query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
does this source image have any face alts yet
def hasAnyFaces(self, uri): return self.graph.queryd( 'ASK { ?uri pho:alternate ?alt . ?alt pho:tag "face" .}', initBindings={'uri':uri})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_faces(self):\n return len(self._faces) > 0", "def check_availability(img_path):\n # loading gray image\n gray_image = cv2.imread(img_path, 0)\n\n # check whether img give empty list or not\n flag = face_recognition.face_locations(gray_image)\n if flag:\n return True\n return False", "def detect_face(image):\n cascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascadePath)\n faces = faceCascade.detectMultiScale(image)\n if len(faces)>=1:#Should be == , not >=\n return True\n return False", "def known_face_detected(self, frame):\n faces_detected = face_recognition.face_encodings(frame)\n if len(faces_detected) > 0:\n unknown = face_recognition.face_encodings(frame)[0]\n results = face_recognition.compare_faces(self.known_faces, unknown)\n if True in results:\n logging.info('Known face detected')\n return True\n logging.info('Unknown face detected')\n return False", "def check_for_known_faces(self, frame):\n faces_detected = face_recognition.face_encodings(frame)\n if len(faces_detected) > 0:\n unknown = face_recognition.face_encodings(frame)[0]\n results = face_recognition.compare_faces(self.known_faces, unknown)\n if True in results:\n logging.info('Known face detected')\n return [self.faces_names[index] for index, value in enumerate(results)]\n logging.info('Unknown face detected')\n return False", "def face_detector(img_path: str):\r\n img = cv2.imread(img_path)\r\n\r\n # if no image at that path, return False\r\n if img is None:\r\n return False\r\n\r\n # convert to grey\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # detect faces. If no face detected, it's empty and len(faces) will be 0\r\n faces = face_cascade.detectMultiScale(gray)\r\n return len(faces) > 0", "def is_existing_face(image, trackers, face):\n\n x1, y1, w1, h1 = face\n face_mask = np.zeros_like(image)\n face_mask[y1:y1+h1, x1:x1+w1] = 1\n for t in trackers:\n try:\n x,y,w,h = t.bounding_box\n t_mask = np.zeros_like(image)\n t_mask[y:y+h, x:x+w] = 1\n\n union = np.sum(np.bitwise_or(face_mask, t_mask))\n intersection = np.bitwise_and(face_mask, t_mask)\n if float(np.sum(intersection))/union > 0.3 or float(np.sum(intersection))/np.sum(t_mask+1) > 0.7:\n return (t, True)\n except Exception:\n pass\n \n return (None, False)", "def face_detector( img_path, face_cascade):\n img = cv2.imread( img_path )\n gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY )\n faces = face_cascade.detectMultiScale( gray )\n if len( faces ) > 0:\n return True\n else:\n return False", "def has_picture(self):\n try:\n first = self.picture_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def detect_face(self, img):\n # Fetch face location from the frame with 128 encoding of face landmarks\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]", "def faces_only(self):\n return self._faces_only", "def hasImage(self):\n if self.getImage():\n return True\n return False", "def has_none_planar_faces(self) -> bool:\n return not all(\n is_planar_face(face) for face in self.faces_as_vertices()\n )", "def __detect_face(self, img):\n gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n return self.detector(gray, 1)", "def isFacesLoaded(self):\n return self.getCurrentCacheData()['isLoaded']", "def findFaces(self):\n\t\trects = self.detectAll()\n\t\tif len(rects)==0:\n\t\t\trects = []\n\t\telse:\n\t\t\trects[:, 2:] += rects[:, :2]\n\t\tself.analyzeFrame(rects)", "def detect_faces(self, img):\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.7)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n\n nrof_faces = bounding_boxes.shape[0]\n img_size = np.asarray(img.shape)[0:2]\n\n faces = []\n faces_rects = []\n\n for i in range(nrof_faces):\n det = bounding_boxes[i,0:4]\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-5/2, 0)\n bb[1] = np.maximum(det[1]-5/2, 0)\n bb[2] = np.minimum(det[2]+5/2, img_size[1])\n bb[3] = np.minimum(det[3]+5/2, img_size[0])\n faces.append(img[bb[1]:bb[3], bb[0]:bb[2], :])\n faces_rects.append({'name': 'none', 'x': bb[0], 'y': bb[1], 'w': bb[2]-bb[0], 'h': bb[3]-bb[1]})\n\n return [img, faces, faces_rects]", "def has_images(self):\n return len(self.images) > 0", "def isGameOver(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.grid[i][j].face == 'down':\n return False\n #if here then all cards must be face up\n return True", "def _load_known_face(self):\n faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')\n faces = [os.path.join(faces_dir, f) for f in os.listdir(faces_dir) if f.endswith('.jpeg')]\n known_images = [face_recognition.load_image_file(i) for i in faces]\n self.known_faces = []\n for image in known_images:\n encoding = face_recognition.face_encodings(image)\n if len(encoding) > 0:\n logging.debug('Adding known face')\n self.known_faces.append(encoding[0])", "def detectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n imageWithOneFace = VLImage.load(filename=EXAMPLE_O)\n pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())\n detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection))\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))\n\n imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n pprint.pprint(\n detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),\n ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),\n ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)]),\n ]\n )\n )", "def hasImage(self):\n return self._image is not None", "def detect_faces(self, image):\n return self.face_detector(image, 1)", "def hasImages(self):\n return len(self.getImages()) > 0", "def hasImages(self):\n return len(self.getImages()) > 0", "def check_camera(self):\n # -- 2. Read the video stream\n if not self.cap.isOpened:\n rospy.logerr(\"[FACE] Error opening video capture\")\n if not self.face_cascade.load(self.face_cascade_name):\n rospy.logerr(\"[FACE] Error loading face cascade\")\n if not self.eyes_cascade.load(self.eyes_cascade_name):\n rospy.logerr(\"[FACE] Error loading eye cascade\")", "def solved(self):\n return all(cell == 1 for row in self.faces for cell in row) or all(cell == 0 for row in self.faces for cell in row)", "def classify_face(im):\r\n faces = get_encoded_faces()\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n \"\"\"\r\n Resize optinal \r\n \"\"\"\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n \"\"\"\r\n All the photo lables in the faces foler end with (number) so a simiple .find(\"(\") command takes the () away from\r\n the label leaving us with the full name of the person\r\n\r\n \"\"\"\r\n\r\n result = name.find('(') \r\n fullname = (name[:result])\r\n \"\"\"\r\n If face_recogntion module recognizes a face but that face is not in the faces module then \r\n it will print unknown and we print 12345678 to use it on the start attednace program \r\n\r\n \"\"\"\r\n if (name == \"Unknown\"):\r\n print(\"12345678\")\r\n else:\r\n \"\"\"\r\n f'{len(face_locayion)}-people - will return the number of people in photo taken by Nao'\r\n \"\"\"\r\n print (f'{len(face_locations)}-people')\r\n print (fullname)\r\n print(courseid)\r\n print (lateornot)\r\n c34 = fullname.find(' ')\r\n firstname = (fullname[:c34])\r\n lastname = (fullname[c34:])\r\n \"\"\"\r\n We get all the data courseid , fristname , lastname, datetime1,and late or not and submited on the website \r\n \r\n\r\n \"\"\"\r\n login_data = {\r\n\t 'Course': courseid,\r\n\t 'FirstName': firstname,\r\n\t 'LastName': lastname,\r\n\t 'Date': datetime2,\r\n\t 'Attendance': 'on',\r\n\t 'Late': latev,\r\n\t 'submitbutton': 'Submit'\r\n }\r\n if(fullname == \"Unknow\"):\r\n \tprint(\"I-dont-know-you\")\r\n else:\r\n \r\n with requests.Session() as s:\r\n \turl = \"https://rbattendance.000webhostapp.com/update.php\"\r\n \tr = s.get(url)\r\n \tsoup = BeautifulSoup(r.content, 'html5lib')\r\n \tr = s.post(url, data = login_data)\r\n \t#print(r.content)\r\n \r\n \r\n\r\n\r\n\r\n\r\n \"\"\"\r\n This for loop is reponsible for drawing on the image \r\n \"\"\"\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n \r\n \r\n while True:\r\n #cv2.imshow('Video', img)\r\n #if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names", "def read_known_faces():\n known_face_encodings = []\n known_face_names = []\n\n for file_name in glob.glob(DATASET_FOLDER + \"/*.jpg\"):\n face_encoding = read_face_encoding(file_name)\n\n known_face_encodings.append(face_encoding)\n\n name = file_name.split('.jpg')[0].split('/')[-1]\n if len(name.split('_')) != 2:\n raise Exception(\"\\n\\nERROR: file \\'\" + file_name + \"\\' has incorrect name\\n\\n\")\n\n known_face_names.append(name)\n\n return known_face_encodings, known_face_names", "def brain_has_lead_image(self, brain=None):", "def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None", "def detect(self, frame): \n return self.__detect_faces(frame)", "def detect_face(self, img):\n #convert the test image to gray image as opencv face detector expects gray images\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n #if no faces are detected then return None\n if (len(faces) == 0):\n return None, None\n\n #under the assumption that there will be only one face,\n #extract the face area\n (x, y, w, h) = faces[0]\n\n #return only the face part of the image\n return gray[y:y+w, x:x+h], faces[0]", "def classify_face(im):\n faces_death = get_encoded_faces_deaths()\n faces_arrested = get_encoded_faces_arrested()\n faces_wanted = get_encoded_faces_wanted()\n\n faces_encoded_death = list(faces_death.values())\n known_face_names_death = list(faces_death.keys())\n\n faces_encoded_arrested = list(faces_arrested.values())\n known_face_names_arrested = list(faces_arrested.keys())\n\n faces_encoded_wanted = list(faces_wanted.values())\n known_face_names_wanted = list(faces_wanted.keys())\n\n img = cv2.imread(im, 1)\n face_locations = face_recognition.face_locations(img)\n unknown_face_encodings = face_recognition.face_encodings(img,face_locations)\n face_names = []\n find_in_db(im,known_face_names_death,unknown_face_encodings,face_names,faces_encoded_death,\"unnatural_death_images/unnatural_death_images\")\n find_in_db(im,known_face_names_arrested,unknown_face_encodings,face_names,faces_encoded_arrested,\"ArrestPerson_images\")\n find_in_db(im,known_face_names_wanted,unknown_face_encodings,face_names,faces_encoded_wanted,\"wanted\")", "def check_nfaces(sections):\n return _check_nentries(sections, \"NFACES\", \"FACES\")", "def face_is_plane(self, face)->bool:\n hs = BRep_Tool_Surface(face)\n downcast_result = Geom_Plane.DownCast(hs)\n # The handle is null if downcast failed or is not possible, that is to say the face is not a plane\n if downcast_result is None:\n return False\n else:\n return True", "def is3DImage(self):\n\t\treturn self.is3D", "def _load_known_faces(self):\n faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')\n faces = [\n os.path.join(faces_dir, f) for f in os.listdir(faces_dir) \\\n if f.endswith('.jpeg') or f.endswith('.jpg') or f.endswith('.png')\n ]\n known_images = [face_recognition.load_image_file(i) for i in faces]\n self.known_faces = []\n self.faces_names = [x.split('/')[-1].split('.')[0].replace('_', ' ').title() for x in faces]\n for image in known_images:\n encoding = face_recognition.face_encodings(image)\n if len(encoding) > 0:\n logging.debug('Adding known face')\n self.known_faces.append(encoding[0])", "def detect_faces(image):\n\n face_locations = face_recognition.face_locations(image)\n return face_locations", "def cue_exist(image):\n for x1 in range(IMAGE_LEFT, IMAGE_RIGHT, 2):\n for y1 in range(IMAGE_TOP, IMAGE_DOWN, 25):\n point1 = (x1, y1)\n if is_cue_color(image.getpixel(point1)):\n\n left2 = max(IMAGE_LEFT, x1 - 70)\n right2 = min(IMAGE_RIGHT, x1 + 70)\n y2 = y1 + 25\n for x2 in range(left2, right2):\n point2 = (x2, y2)\n if is_cue_line(point1, point2, image):\n return True\n return False", "def detect_face_api(self, img):\n\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]", "def recognize_face(a_face):\r\n if not type(a_face) is TopoDS_Face:\r\n print(\"Please hit the 'G' key to switch to face selection mode\")\r\n return False\r\n surf = BRepAdaptor_Surface(a_face, True)\r\n surf_type = surf.GetType()\r\n if surf_type == GeomAbs_Plane:\r\n print(\"Identified Plane Geometry\")\r\n # look for the properties of the plane\r\n # first get the related gp_Pln\r\n gp_pln = surf.Plane()\r\n location = gp_pln.Location() # a point of the plane\r\n normal = gp_pln.Axis().Direction() # the plane normal\r\n # then export location and normal to the console output\r\n print(\r\n \"--> Location (global coordinates)\",\r\n location.X(),\r\n location.Y(),\r\n location.Z(),\r\n )\r\n print(\"--> Normal (global coordinates)\", normal.X(), normal.Y(), normal.Z())\r\n elif surf_type == GeomAbs_Cylinder:\r\n print(\"Identified Cylinder Geometry\")\r\n # look for the properties of the cylinder\r\n # first get the related gp_Cyl\r\n gp_cyl = surf.Cylinder()\r\n location = gp_cyl.Location() # a point of the axis\r\n axis = gp_cyl.Axis().Direction() # the cylinder axis\r\n # then export location and normal to the console output\r\n print(\r\n \"--> Location (global coordinates)\",\r\n location.X(),\r\n location.Y(),\r\n location.Z(),\r\n )\r\n print(\"--> Axis (global coordinates)\", axis.X(), axis.Y(), axis.Z())\r\n elif surf_type == GeomAbs_BSplineSurface:\r\n print(\"Identified BSplineSurface Geometry\")\r\n # gp_bsrf = surf.Surface()\r\n # degree = gp_bsrf.NbUKnots()\r\n # TODO use a model that provided BSplineSurfaces, as1_pe_203.stp only contains\r\n # planes and cylinders\r\n else:\r\n # TODO there are plenty other type that can be checked\r\n # see documentation for the BRepAdaptor class\r\n # https://www.opencascade.com/doc/occt-6.9.1/refman/html/class_b_rep_adaptor___surface.html\r\n print(surf_type, \"recognition not implemented\")", "def detect_faces(self, img) -> list:\r\n if img is None or not hasattr(img, \"shape\"):\r\n raise InvalidImage(\"Image not valid.\")\r\n\r\n height, width, _ = img.shape\r\n stage_status = StageStatus(width=width, height=height)\r\n\r\n m = 12 / self._min_face_size\r\n min_layer = np.amin([height, width]) * m\r\n\r\n scales = self.__compute_scale_pyramid(m, min_layer)\r\n\r\n stages = [self.__stage1, self.__stage2, self.__stage3]\r\n result = [scales, stage_status]\r\n\r\n # We pipe here each of the stages\r\n for stage in stages:\r\n result = stage(img, result[0], result[1])\r\n\r\n [total_boxes, points] = result\r\n\r\n bounding_boxes = []\r\n\r\n for bounding_box, keypoints in zip(total_boxes, points.T):\r\n x = max(0, int(bounding_box[0]))\r\n y = max(0, int(bounding_box[1]))\r\n width = int(bounding_box[2] - x)\r\n height = int(bounding_box[3] - y)\r\n bounding_boxes.append({\r\n 'box': [x, y, width, height],\r\n 'confidence': bounding_box[-1],\r\n 'keypoints': {\r\n 'left_eye': (int(keypoints[0]), int(keypoints[5])),\r\n 'right_eye': (int(keypoints[1]), int(keypoints[6])),\r\n 'nose': (int(keypoints[2]), int(keypoints[7])),\r\n 'mouth_left': (int(keypoints[3]), int(keypoints[8])),\r\n 'mouth_right': (int(keypoints[4]), int(keypoints[9])),\r\n }\r\n })\r\n\r\n return bounding_boxes", "def is_depth_image(self):\n return False", "def recognize_faces(image_file_path):\n image_pil = Image.open(image_file_path)\n draw = ImageDraw.Draw(image_pil)\n\n known_face_encodings_dict = get_known_face_encodings_dict()\n known_names = list(known_face_encodings_dict.keys())\n known_face_encodings = list(known_face_encodings_dict.values())\n\n del known_face_encodings_dict\n\n for face_location in face_detection.get_face_locations(image_file_path):\n face_encoding = get_face_encodings(\n image_file_path, known_face_locations=[face_location]\n )[0]\n\n recognition_flags = face_recognition.compare_faces(\n known_face_encodings, face_encoding\n )\n\n for flag, name in zip(recognition_flags, known_names):\n if not flag:\n continue\n\n top, right, bottom, left = face_location\n draw.rectangle((left, top, right, bottom), outline=\"#FF1493\")\n text_width, text_height = draw.textsize(name)\n draw.rectangle(\n (left, bottom, right, bottom + text_height + 10),\n fill=\"#FF1493\",\n outline=\"#FF1493\",\n )\n draw.text((left + 6, bottom + 5), name, fill=\"white\")\n\n del draw # conserve resources\n image_pil.show()", "def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()", "def is_ball(self, blob, im):\n size=self.cam.info.get_pixel_size(blob[0], im)*len(blob)\n return size>15 and size<45", "def process_frame(img, frame, boxes):\n faces = boxes\n # Loop through all the faces detected and determine whether or not they are in the database\n identities = []\n\n for (y, x, h, w) in faces: #add padding\n x1 = int(x-PADDING)\n y1 = int(y-PADDING)\n x2 = int(w+PADDING)\n y2 = int(h+PADDING)\n\n identity = find_identity(frame, x1, y1, x2, y2)\n\n if identity is not None:\n identities.append(identity)\n\n if identities != []:\n welcome_users(identities)\n\n #set flag to not recognize until face detected again\n ready_to_recognize_file = open(\"varThread\\\\ready_recognize.txt\", \"r+\")\n ready_to_recognize_file.seek(0,0)\n ready_to_recognize_file.write('0')\n ready_to_recognize_file.close()", "def detect_faces_in_video(self):\r\n logger.debug('Executing face detection')\r\n\r\n use_eyes_position = c.USE_EYES_POSITION\r\n\r\n if ((self.params is not None) and\r\n (c.USE_EYES_POSITION_KEY in self.params)):\r\n use_eyes_position = self.params[c.USE_EYES_POSITION_KEY]\r\n\r\n det_loaded = False\r\n\r\n # Try to load YAML file with detection results\r\n if os.path.exists(self.det_file_path):\r\n\r\n print 'Loading YAML file with detection results'\r\n logger.debug('Loading YAML file with detection results')\r\n\r\n det_faces = utils.load_YAML_file(self.det_file_path)\r\n\r\n if det_faces:\r\n self.detected_faces = det_faces\r\n\r\n print 'YAML file with detection results loaded'\r\n logger.debug('YAML file with detection results loaded')\r\n\r\n det_loaded = True\r\n\r\n if not det_loaded:\r\n\r\n # Check existence of frame list\r\n if len(self.frame_list) == 0:\r\n\r\n # Try to load YAML file with frame list\r\n if os.path.exists(self.frames_file_path):\r\n\r\n print 'Loading YAML file with frame list'\r\n logger.debug('Loading YAML file with frame list')\r\n\r\n f_list = utils.load_YAML_file(self.frames_file_path)\r\n\r\n if f_list:\r\n\r\n self.frame_list = f_list\r\n\r\n print 'YAML file with frame list loaded'\r\n logger.debug('YAML file with frame list loaded')\r\n\r\n else:\r\n\r\n print 'Warning! Error in loading file!'\r\n logger.warning('Error in loading file!')\r\n\r\n else:\r\n\r\n print 'Warning! No frame list found!'\r\n logger.warning('No frame list found!')\r\n\r\n return\r\n\r\n print '\\n\\n### Face detection ###\\n'\r\n logger.debug('\\n\\n### Face detection ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n if not (os.path.exists(self.det_path)):\r\n # Create directory for this video\r\n\r\n os.makedirs(self.det_path)\r\n\r\n if not (os.path.exists(self.align_path)):\r\n # Create directory with aligned faces\r\n\r\n os.makedirs(self.align_path)\r\n\r\n frame_counter = 0\r\n self.detected_faces = []\r\n\r\n detection_results = []\r\n\r\n # Build list of frame names, frame paths and elapsed time\r\n frame_name_list = []\r\n\r\n frame_path_list = []\r\n\r\n elapsed_s_list = []\r\n\r\n for frame_dict in self.frame_list:\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n frame_name_list.append(frame_name)\r\n\r\n frame_path = os.path.join(self.frames_path, frame_name)\r\n\r\n frame_path_list.append(frame_path)\r\n\r\n elapsed_s = frame_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n elapsed_s_list.append(elapsed_s)\r\n\r\n # Iterate through frame paths in list\r\n for frame_path in frame_path_list:\r\n self.progress = 100 * (frame_counter / self.saved_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n detection_result = fd.detect_faces_in_image(\r\n frame_path, self.align_path, self.params, False)\r\n\r\n detection_results.append(detection_result)\r\n\r\n frame_counter += 1\r\n\r\n frame_counter = 0\r\n\r\n # Iterate through detection results\r\n for detection_result in detection_results:\r\n\r\n detection_error = detection_result[c.ERROR_KEY]\r\n\r\n detection_dict = {\r\n c.SAVED_FRAME_NAME_KEY: frame_name_list[frame_counter],\r\n c.FRAME_COUNTER_KEY: frame_counter}\r\n\r\n elapsed_s = elapsed_s_list[frame_counter]\r\n\r\n detection_dict[c.ELAPSED_VIDEO_TIME_KEY] = elapsed_s\r\n\r\n faces = []\r\n if not detection_error:\r\n\r\n det_faces = detection_result[c.FACES_KEY]\r\n\r\n for det_face in det_faces:\r\n\r\n face_dict = {c.BBOX_KEY: det_face[c.BBOX_KEY]}\r\n\r\n if use_eyes_position:\r\n face_dict[c.LEFT_EYE_POS_KEY] = (\r\n det_face[c.LEFT_EYE_POS_KEY])\r\n\r\n face_dict[c.RIGHT_EYE_POS_KEY] = (\r\n det_face[c.RIGHT_EYE_POS_KEY])\r\n\r\n face_dict[c.NOSE_POSITION_KEY] = (\r\n det_face[c.NOSE_POSITION_KEY])\r\n\r\n face_dict[c.ALIGNED_FACE_FILE_NAME_KEY] = (\r\n det_face[c.ALIGNED_FACE_FILE_NAME_KEY])\r\n\r\n faces.append(face_dict)\r\n\r\n detection_dict[c.FACES_KEY] = faces\r\n\r\n self.detected_faces.append(detection_dict)\r\n\r\n frame_counter += 1\r\n\r\n # Save detection results in YAML file\r\n\r\n utils.save_YAML_file(self.det_file_path, self.detected_faces)\r\n\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for face detection: ', time_in_seconds, 's\\n'\r\n logger.debug('Time for face detection: ', time_in_seconds, 's\\n')\r\n\r\n self.anal_times[c.FACE_DETECTION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)", "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5", "def _box_faces(image):\n for face in image.faces:\n _box_face(image, face)\n return image", "def num_faces(self):\n return self._top_exp.number_of_faces()", "def faces(self):\r\n return self._faces", "def images_exist(self):\n pass", "def is_fit(self):\n if not hasattr(self, '_icc_imgs'):\n return False\n else:\n return self._icc_imgs is not None", "def face_detect(sess, net, image_name):\n\n\t# Load the demo image\n\tim_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n\tim = cv2.imread(im_file)\n\n\t# Detect all object classes and regress object bounds\n\ttimer = Timer()\n\ttimer.tic()\n\t# scores, boxes = im_detect(sess, net, im)\n\tscores, boxes, eyes, smiles = im_detect_ori(sess, net, im)\n\ttimer.toc()\n\tprint ('Detection took {:.3f}s for '\n\t\t\t'{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n\t# Visualize detections for each class\n\t# im = im[:, :, (2, 1, 0)]\n\t# fig, ax = plt.subplots(figsize=(8, 8))\n\t# ax.imshow(im, aspect='equal')\n\n\tCONF_THRESH = 0.9\n\tNMS_THRESH = 0.3\n\tfor cls_ind, cls in enumerate(CLASSES[20:]):\n\t\tcls_ind += 20 # because we skipped everything except face\n\t\tcls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\t\tcls_scores = scores[:, cls_ind]\n\t\tdets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)\n\t\tkeep = nms(dets, NMS_THRESH)\n\t\tdets = dets[keep, :]\n\t\teye = eyes[keep, :]\n\t\tsmile= smiles[keep, :]\n\n\tinds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n\tface_num = len(inds)\n\tprint '{} faces detected!'.format(face_num)\n\tdets = dets[inds, :]\n\teye = eye[inds, 1]\n\tsmile = smile[inds, 1]\n\n\treturn dets, eye, smile", "def empty(self):\n return len(self.layers) == 0", "def test_face_in_face(self):\n w = mt.createCube(marker=1, boundaryMarker=1)\n b = w.boundary(2)\n\n pad = mt.createFacet(mt.createCircle(radius=0.2, segments=12,\n isHole=True))\n b2 = pad.boundary(0)\n\n # rotate to match target norm and pos\n rot = pg.core.getRotation(b2.norm(), b.norm())\n pad.transform(rot)\n pad.translate(b.center())\n\n # create a boundary with new marker match the hole\n w.copyBoundary(b2)\n\n w.createBoundary(w.nodes([w.createNode(n.pos()).id() for n in b2.nodes()]),\n marker=2)\n\n #print(w.boundaryMarkers())\n\n mesh = mt.createMesh(w)\n\n #pg.show(mesh)\n # w.exportPLC('pad.poly')\n # mesh.exportBoundaryVTU('b.vtu')\n np.testing.assert_array_equal(pg.unique(pg.sort(mesh.boundaryMarkers())),\n [0, 1, 2])\n\n # print(mesh)\n # mesh.exportBoundaryVTU('b.vtu')\n pg.show(mesh)", "def detectFace_eye(img):\n net_24.blobs['X'].reshape(1, 3, 24, 24)\n img = cv2.resize(img, (24, 24))\n img = (img - 127.5) / 127.5\n\n img = img.transpose((2, 0, 1))\n net_24.blobs['X'].data[...] = img\n out = net_24.forward()\n cls_prob = out['prob']\n return cls_prob[0]", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def has_efg_tensors(self) -> bool:\n return self._efg_tensors is not None", "def is_isotropic(self):\n return self.fibres is None", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def faceRecognition(image):\r\n faceLandmarks = [[],[],[]]\r\n face_landmarks_list = face_recognition.face_landmarks(image)\r\n if len(face_landmarks_list)>0:\r\n if len(face_landmarks_list[0]['left_eye'])>0:\r\n leftEyePos = [tuple(map(lambda i: int(i/32),i)) for i in face_landmarks_list[0]['left_eye']]\r\n for i in set(leftEyePos):\r\n if leftEyePos.count(i)>=len(leftEyePos)//len(set(leftEyePos)):\r\n faceLandmarks[0] += [i,]\r\n if len(face_landmarks_list[0]['right_eye'])>0:\r\n rightEyePos = [tuple(map(lambda i: int(i/32),i)) for i in face_landmarks_list[0]['right_eye']]\r\n for i in set(rightEyePos):\r\n if rightEyePos.count(i)>=len(rightEyePos)//len(set(rightEyePos)):\r\n faceLandmarks[1] += [i,]\r\n if len(face_landmarks_list[0]['top_lip'])>0:\r\n mouthPos = [tuple(map(lambda i: int(i/32),i)) for i in (face_landmarks_list[0]['top_lip']+face_landmarks_list[0]['bottom_lip'])]\r\n for i in set(mouthPos):\r\n if mouthPos.count(i)>=len(mouthPos)//len(set(mouthPos)):\r\n faceLandmarks[2] += [i,]\r\n return faceLandmarks", "def face_detector(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # faceCascade imports in the previously made classifier\n faceCascade = cv2.CascadeClassifier('src/face_detection/haarcascade_frontalface_default.xml')\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=1, \n minSize=(100, 100)\n )\n\n return faces", "def isFim(self):\r\n return", "def detect_face(self):\n ret, frame = self.cap.read()\n if ret:\n error_x, error_y = self.detect_and_display(frame)\n self.servo_controller(error_x, error_y)", "def track_faces_in_video(self):\r\n\r\n logger.debug('Executing face tracking')\r\n\r\n track_loaded = False\r\n\r\n # Try to load YAML file with tracking results\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n track_faces = utils.load_YAML_file(self.track_file_path)\r\n\r\n if track_faces:\r\n self.tracked_faces = track_faces\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n track_loaded = True\r\n\r\n if not track_loaded:\r\n\r\n # Check existence of detection results\r\n\r\n if len(self.detected_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.det_file_path):\r\n\r\n print 'Loading YAML file with detection results'\r\n logger.debug('Loading YAML file with detection results')\r\n\r\n with open(self.det_file_path) as f:\r\n\r\n self.detected_faces = yaml.load(f)\r\n\r\n print 'YAML file with detection results loaded'\r\n logger.debug('YAML file with detection results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No detection results found!'\r\n logger.warning('No detection results found!')\r\n\r\n return\r\n\r\n # Get shot cuts\r\n self.calc_hist_diff()\r\n\r\n print '\\n\\n### Face tracking ###\\n'\r\n logger.debug('\\n\\n### Face tracking ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n self.tracked_faces = []\r\n\r\n self.disc_tracked_faces = []\r\n\r\n # Counter for frames with detected faces\r\n frame_counter = 0\r\n\r\n # If a reduced frame rate is used, frames are less\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n used_fps = c.USED_FPS\r\n min_segment_duration = c.MIN_SEGMENT_DURATION\r\n tracking_min_int_area = c.TRACKING_MIN_INT_AREA\r\n min_size_width = c.FACE_DETECTION_MIN_SIZE_WIDTH\r\n min_size_height = c.FACE_DETECTION_MIN_SIZE_HEIGHT\r\n max_fr_with_miss_det = c.MAX_FR_WITH_MISSED_DET\r\n use_aligned_face = c.USE_ALIGNED_FACE_IN_TRACKING\r\n\r\n if self.params is not None:\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n if c.MIN_SEGMENT_DURATION_KEY in self.params:\r\n min_segment_duration = self.params[\r\n c.MIN_SEGMENT_DURATION_KEY]\r\n if c.TRACKING_MIN_INT_AREA_KEY in self.params:\r\n tracking_min_int_area = self.params[\r\n c.TRACKING_MIN_INT_AREA_KEY]\r\n if c.MIN_SIZE_WIDTH_KEY in self.params:\r\n min_size_width = self.params[c.MIN_SIZE_WIDTH_KEY]\r\n if c.MIN_SIZE_HEIGHT_KEY in self.params:\r\n min_size_height = self.params[c.MIN_SIZE_HEIGHT_KEY]\r\n if c.MAX_FR_WITH_MISSED_DET_KEY in self.params:\r\n max_fr_with_miss_det = self.params[\r\n c.MAX_FR_WITH_MISSED_DET_KEY]\r\n if c.USE_ALIGNED_FACE_IN_TRACKING_KEY in self.params:\r\n use_aligned_face = self.params[\r\n c.USE_ALIGNED_FACE_IN_TRACKING_KEY]\r\n\r\n # Minimum duration of a segment in frames\r\n min_segment_frames = int(\r\n math.ceil(self.fps * min_segment_duration))\r\n\r\n if not use_or_fps:\r\n min_segment_frames = int(\r\n math.ceil((used_fps + 1) * min_segment_duration))\r\n\r\n # Make copy of detected faces\r\n detection_list = list(self.detected_faces)\r\n\r\n # Iterate through frames in detected_faces\r\n for detection_dict in detection_list:\r\n\r\n self.progress = 100 * (frame_counter / self.saved_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n elapsed_s = detection_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n frame_name = detection_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n faces = detection_dict[c.FACES_KEY]\r\n\r\n face_counter = 0\r\n\r\n # Iterate though faces in frame\r\n for face_dict in faces:\r\n\r\n track_window = face_dict[c.BBOX_KEY]\r\n\r\n left_eye_pos = face_dict[c.LEFT_EYE_POS_KEY]\r\n\r\n right_eye_pos = face_dict[c.RIGHT_EYE_POS_KEY]\r\n\r\n nose_pos = face_dict[c.NOSE_POSITION_KEY]\r\n\r\n file_name = face_dict[c.ALIGNED_FACE_FILE_NAME_KEY]\r\n\r\n # Counter for faces in segment\r\n segment_face_counter = 1\r\n\r\n segment_frame_list = []\r\n\r\n # Start new segment\r\n segment_frame_dict = {c.FRAME_COUNTER_KEY: frame_counter,\r\n c.ELAPSED_VIDEO_TIME_KEY: elapsed_s,\r\n c.DETECTION_BBOX_KEY: track_window,\r\n c.TRACKING_BBOX_KEY: track_window,\r\n c.LEFT_EYE_POS_KEY: left_eye_pos,\r\n c.RIGHT_EYE_POS_KEY: right_eye_pos,\r\n c.NOSE_POSITION_KEY: nose_pos,\r\n c.ALIGNED_FACE_FILE_NAME_KEY: file_name,\r\n c.DETECTED_KEY: True,\r\n c.SAVED_FRAME_NAME_KEY: frame_name}\r\n\r\n segment_frame_list.append(segment_frame_dict)\r\n\r\n aligned_file_path = None\r\n rgb_roi = None\r\n if use_aligned_face:\r\n # Use the aligned face as the\r\n # Region of Interest for tracking\r\n complete_file_name = file_name + '.png'\r\n aligned_file_path = os.path.join(\r\n self.align_path, complete_file_name)\r\n\r\n rgb_roi = cv2.imread(\r\n aligned_file_path, cv2.IMREAD_COLOR)\r\n\r\n else:\r\n # Use detected face as the\r\n # Region of Interest for tracking\r\n x0 = track_window[0]\r\n y0 = track_window[1]\r\n w = track_window[2]\r\n h = track_window[3]\r\n x1 = x0 + w\r\n y1 = y0 + h\r\n\r\n frame_path = os.path.join(\r\n self.frames_path, frame_name)\r\n\r\n # Whole frame\r\n rgb = cv2.imread(frame_path, cv2.IMREAD_COLOR)\r\n\r\n # Face\r\n rgb_roi = rgb[y0:y1, x0:x1]\r\n\r\n if rgb_roi is None:\r\n print('Warning! Face to be tracked is None')\r\n\r\n if use_aligned_face:\r\n logger.warning(\r\n 'Face ' + aligned_file_path + ' is None')\r\n else:\r\n logger.warning(\r\n 'Face from frame ' + frame_name + ' is None')\r\n\r\n face_counter += 1\r\n\r\n continue\r\n\r\n # Convert image to hsv\r\n hsv_roi = cv2.cvtColor(rgb_roi, cv2.COLOR_BGR2HSV)\r\n\r\n mask_roi = cv2.inRange(\r\n hsv_roi, np.array((0., 60., 32.)),\r\n np.array((180., 255., 255.)))\r\n\r\n hist = cv2.calcHist(\r\n [hsv_roi], [0], mask_roi, [16], [0, 180])\r\n\r\n cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)\r\n hist = hist.reshape(-1)\r\n\r\n # Face should not be considered anymore\r\n del (detection_list[frame_counter]\r\n [c.FACES_KEY][face_counter])\r\n\r\n sub_frame_counter = frame_counter + 1\r\n\r\n missed_det_counter = 0\r\n\r\n # Iterate through subsequent frames\r\n for sub_det_dict in detection_list[sub_frame_counter:]:\r\n\r\n # Check if a new shot begins\r\n if sub_frame_counter in self.cut_idxs:\r\n break\r\n\r\n sub_frame_name = sub_det_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n sub_frame_path = os.path.join(\r\n self.frames_path, sub_frame_name)\r\n\r\n # Read image from given path\r\n sub_image = cv2.imread(\r\n sub_frame_path, cv2.IMREAD_COLOR)\r\n\r\n if sub_image is None:\r\n print('Warning! Image is None')\r\n logger.warning(\r\n 'Image ' + sub_frame_path + ' is None')\r\n\r\n continue\r\n\r\n # Convert image to hsv\r\n sub_hsv = cv2.cvtColor(sub_image, cv2.COLOR_BGR2HSV)\r\n\r\n sub_mask = cv2.inRange(sub_hsv,\r\n np.array((0., 60., 32.)),\r\n np.array((180., 255., 255.)))\r\n\r\n # Apply meanshift to get the new location\r\n prob = cv2.calcBackProject(\r\n [sub_hsv], [0], hist, [0, 180], 1)\r\n prob &= sub_mask\r\n term_crit = (cv2.TERM_CRITERIA_EPS\r\n | cv2.TERM_CRITERIA_COUNT, 10, 1)\r\n\r\n track_box, track_window = cv2.CamShift(\r\n prob, track_window, term_crit)\r\n\r\n track_x0 = track_window[0]\r\n track_y0 = track_window[1]\r\n track_w = track_window[2]\r\n track_h = track_window[3]\r\n\r\n # Check size of track window\r\n if ((track_w <= min_size_width)\r\n or (track_h <= min_size_height)):\r\n\r\n break\r\n\r\n segment_frame_dict = {}\r\n\r\n track_list = (\r\n int(track_x0), int(track_y0), int(track_w),\r\n int(track_h))\r\n\r\n segment_frame_dict[c.TRACKING_BBOX_KEY] = track_list\r\n\r\n sub_faces = sub_det_dict[c.FACES_KEY]\r\n\r\n sub_face_counter = 0\r\n\r\n sim = False\r\n\r\n det_bbox = None\r\n\r\n for sub_face_dict in sub_faces:\r\n\r\n det_bbox = sub_face_dict[c.BBOX_KEY]\r\n\r\n # If track window corresponds to\r\n # a detected face,\r\n # delete detection from list\r\n\r\n (sim, int_area, int_area_pct) = utils.is_rect_similar(\r\n track_window, det_bbox, tracking_min_int_area)\r\n\r\n if sim:\r\n # det_face_counter = det_face_counter + 1\r\n\r\n track_window = det_bbox\r\n\r\n break\r\n\r\n sub_face_counter += 1\r\n\r\n t_x0 = track_window[0]\r\n t_y0 = track_window[1]\r\n t_w = track_window[2]\r\n t_h = track_window[3]\r\n\r\n segment_frame_dict[c.DETECTION_BBOX_KEY] = det_bbox\r\n\r\n # If a detected face corresponds to track window\r\n # delete detected face from detection list\r\n\r\n if sim:\r\n\r\n missed_det_counter = 0\r\n\r\n segment_frame_dict[c.DETECTED_KEY] = True\r\n\r\n segment_frame_dict[c.LEFT_EYE_POS_KEY] = (\r\n sub_face_dict[c.LEFT_EYE_POS_KEY])\r\n segment_frame_dict[c.RIGHT_EYE_POS_KEY] = (\r\n sub_face_dict[c.RIGHT_EYE_POS_KEY])\r\n\r\n segment_frame_dict[c.NOSE_POSITION_KEY] = (\r\n sub_face_dict[c.NOSE_POSITION_KEY])\r\n\r\n segment_frame_dict[c.ALIGNED_FACE_FILE_NAME_KEY] = (\r\n sub_face_dict[c.ALIGNED_FACE_FILE_NAME_KEY])\r\n\r\n del (detection_list[sub_frame_counter]\r\n [c.FACES_KEY][sub_face_counter])\r\n\r\n else:\r\n\r\n # Check if distance from last detection\r\n # is too big\r\n missed_det_counter += 1\r\n\r\n if missed_det_counter > max_fr_with_miss_det:\r\n\r\n # Remove last frames and\r\n # interrupt tracking\r\n for i in range(0, max_fr_with_miss_det):\r\n segment_frame_list.pop()\r\n\r\n segment_face_counter = (\r\n segment_face_counter - max_fr_with_miss_det)\r\n\r\n break\r\n\r\n segment_frame_dict[c.DETECTED_KEY] = False\r\n\r\n elapsed_ms = sub_det_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n # Update list of frames for segment\r\n segment_frame_dict[\r\n c.FRAME_COUNTER_KEY] = sub_frame_counter\r\n segment_frame_dict[\r\n c.ELAPSED_VIDEO_TIME_KEY] = elapsed_ms\r\n\r\n track_list = (\r\n int(t_x0), int(t_y0), int(t_w), int(t_h))\r\n\r\n segment_frame_dict[c.TRACKING_BBOX_KEY] = track_list\r\n segment_frame_dict[\r\n c.SAVED_FRAME_NAME_KEY] = sub_frame_name\r\n\r\n segment_frame_list.append(segment_frame_dict)\r\n\r\n del sub_image\r\n\r\n sub_frame_counter += 1\r\n\r\n segment_face_counter += 1\r\n\r\n # Segment must be considered only if its number\r\n # of frames is greater or equals than a minimum\r\n if segment_face_counter >= min_segment_frames:\r\n\r\n segments = self.divide_segment_by_face(\r\n segment_frame_list)\r\n\r\n if len(segments) > 0:\r\n self.tracked_faces.extend(segments)\r\n\r\n else:\r\n\r\n segment_dict = {c.FRAMES_KEY: segment_frame_list}\r\n\r\n self.disc_tracked_faces.append(segment_dict)\r\n\r\n # Check histograms of detected faces and\r\n # divide segment accordingly\r\n\r\n face_counter += 1\r\n\r\n frame_counter += 1\r\n\r\n # Create directory for this video\r\n\r\n if not (os.path.exists(self.track_path)):\r\n os.makedirs(self.track_path)\r\n\r\n # Save tracking result in YAML file\r\n utils.save_YAML_file(self.track_file_path, self.tracked_faces)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for face tracking:', time_in_seconds, 's\\n'\r\n logger.debug('Time for face tracking:', time_in_seconds, 's\\n')\r\n\r\n self.anal_times[c.FACE_TRACKING_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)", "def is_trained(self) -> bool:", "def get_faces(image):\n return (image.crop(face) for face in image.faces)", "def __do_memebers_exist__(self):\n assert self.element_type is not None\n assert self.elements is not None\n assert self.points is not None\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n assert self.edges is not None\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n assert self.faces is not None", "def detection():\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=3,\n minSize=(30, 30)\n )\t#Haar-cascade: A Face detection algorithm\n\n area = faces[:,2] * faces[:,3]\n faces = np.c_[faces,area]\t#concatenates area values to last column of 'face' array.\n\n print('All detected faces\\n',faces)\n i,j = unravel_index(faces.argmax(), faces.shape)\t# gets the position of maximum value from 'face' array.\n print(i,j)\n print(\"Found %d Face%s!\" %(len(faces),\"s\"[len(faces)==1:]))\n\n X = faces[i,0]\n Y = faces[i,1]\n W = faces[i,2]\n H = faces[i,3]\n \n cv2.rectangle(image, (X, Y), (X + W, Y + H), (0, 255, 0), 2)\n roi_color = image[Y:Y + H, X:X + W] \n print(\"Face(largest) Extracted.\")\n cv2.imwrite('Extracted_face.jpg', roi_color)\t#Image Extraction.\n status = cv2.imwrite('Output.jpg', image)\n print(\"Image Output.jpg written to filesystem: \", status)", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def detect_faces(path):\n from google.cloud import vision\n from PIL import Image, ImageDraw\n import io\n\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n response = client.face_detection(image=image)\n faces = response.face_annotations\n face_distance = [10000000] * len(faces)\n face_area = []\n face_vertices = []\n\n counter = 0\n for face in faces:\n face_vertices.append((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y))\n face_area.append(area((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y), \n (face.bounding_poly.vertices[1].x, face.bounding_poly.vertices[1].y),\n (face.bounding_poly.vertices[2].x, face.bounding_poly.vertices[2].y)))\n im = Image.open(path)\n cropped = im.crop((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y, face.bounding_poly.vertices[2].x, face.bounding_poly.vertices[2].y))\n #cropped.show()\n cropped.save(\"./media/images/\" + str(counter) + \".jpg\")\n counter += 1\n \n for i in range(len(faces)):\n min_dist = 0\n for j in range(len(faces)):\n distance = dist(face_vertices[i], face_vertices[j])\n if distance > 0 and (face_area[i] + face_area[j]) / distance < face_distance[i]: \n face_distance[i] = (face_area[i] + face_area[j]) / distance\n \n \n with Image.open(path) as im:\n counter = 0\n \n draw = ImageDraw.Draw(im)\n for face in faces:\n draw.rectangle([face.bounding_poly.vertices[counter].x, face.bounding_poly.vertices[counter].y,\n face.bounding_poly.vertices[counter + 2].x, face.bounding_poly.vertices[counter + 2].y], None, \"#0000ff\", 3)\n for i in range(len(faces)):\n if face_distance[i] < 30 or len(faces) == 1: colour = \"#00ff00\"\n else: colour = \"#ff0000\"\n draw.rectangle([faces[i].bounding_poly.vertices[0].x, faces[i].bounding_poly.vertices[0].y,\n faces[i].bounding_poly.vertices[2].x, faces[i].bounding_poly.vertices[2].y], None, colour, 3)\n draw.text((faces[i].bounding_poly.vertices[0].x - 10, faces[i].bounding_poly.vertices[0].y - 10), str(i+1), \"#ff0000\",font=None, anchor=None, spacing=4, align='left', direction=None, features=None, language=None, stroke_width=1, stroke_fill=None, embedded_color=False)\n\n im.save(\"./media/images/upload.jpg\")\n return len(faces)\n if response.error.message:\n raise Exception('Error')", "def face_detection(frame):\n if frame is None :\n return 0,0,0,0\n \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n # Draw a rectangle around the faces\n position_x, position_y ,width,height = 0, 0, 0, 0\n for x, y, w, h in faces:\n position_x, position_y ,width,height = x, y, w, h\n\n return position_x, position_y,width,height", "def get_faces():\n detected_faces = request()\n\n if not detected_faces:\n raise FaceNotDetectedError()\n return detected_faces", "def game_active():\n im = region_grabber((0, 0, 110, 30))\n pos = imagesearcharea(\"Images/title.jpg\", 0, 0, 0, 0, 0.9, im) # Black background\n return pos != [-1, -1]", "def face(self):\n\n return self.faceup", "def detect_face(self, img, img_file_path=None):\n #use dlib face detector\n #create dlib detector, this is hog with svm\n detector = dlib.get_frontal_face_detector()\n #win = dlib.image_window()\n if img_file_path:\n img = dlib.load_rgb_image(img_file_path)\n #detect number of faces in an image\n dets = detector(img)\n list_face_coord = [] # this will store left, top, right, bottom\n for i, d in enumerate(dets):\n list_face_coord.append((d.left(), d.top(), d.right(), d.bottom()))\n return list_face_coord", "def _recognize_face(unknown_encoding, loaded_encodings):\n boolean_matches = face_recognition.compare_faces(\n loaded_encodings[\"encodings\"], unknown_encoding\n )\n votes = Counter(\n name\n for match, name in zip(boolean_matches, loaded_encodings[\"names\"])\n if match\n )\n if votes:\n return votes.most_common(1)[0][0]", "def face_detection(img, faceCascade=faceCascade):\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tfaces = faceCascade.detectMultiScale(\n\t\tgray,\n\t\tscaleFactor=1.2,\n\t\tminNeighbors=5,\n\t\tminSize=(32, 32))\n\n\t# If no face detected\n\tif len(faces) == 0:\n\t\tw = min(img.shape[0], img.shape[1])\n\t\treturn img[(img.shape[0]-w)//2:(img.shape[0]+w)//2, (img.shape[1]-w)//2:(img.shape[1]+w)//2, :]\n\n\t# If faces detected, choose the face with the max size\n\tmax_h, index = 0, 0\n\tfor i, (x, y, w, h) in enumerate(faces):\n\t\tif max_h < h:\n\t\t\tmax_h, index = h, i\n\n\t(x, y, w, h) = faces[index]\n\n\tif img.shape[0]>img.shape[1]:\n\t\tif x + w/2 < img.shape[0]/2:\n\t\t\treturn img[:img.shape[1],:,:]\n\n\t\telse:\n\t\t\treturn img[-img.shape[1]:,:,:]\n\n\telse:\n\t\tif y + h/2 < img.shape[1]/2:\n\t\t\treturn img[:,:img.shape[0],:]\n\n\t\telse:\n\t\t\treturn img[:,-img.shape[0]:,:]", "def has_data(self):\n if len(self.channels) > 0:\n return True\n return False", "def get_face_data(self, image_file, flag):\n image = 0\n if flag == self.FILE_READ:\n image = cv2.imread(image_file)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n elif flag == self.NETWORK_BYTE_STREAM:\n image = cv2.imdecode(\n numpy.fromstring(image_file.read(), numpy.uint8), cv2.IMREAD_UNCHANGED\n )\n elif flag == self.IMAGE_DATA or flag is None:\n image = image_file\n landmarks = self.__get_landmarks(image)\n if landmarks[0] is None or landmarks[1] is None:\n return None\n return landmarks", "def has_legacy_image(self):\n pass", "def has_legacy_image(self):\n pass", "def update(self,image):\r\n \r\n self._faces=[]\r\n \r\n if util.isgray(image):\r\n image=cv2.equalizeHist(image)\r\n \r\n else:\r\n \r\n image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n cv2.equalizeHist(image,image)\r\n \r\n minsize=util.widthheightdividedby(image,8)\r\n\r\n \r\n\r\n \r\n facerect=self._faceclassifier.detectMultiScale(image,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n \"\"\"if facerects is not None:\r\n \r\n for facerect in facerects:\r\n face=face()\r\n \r\n face.facerect=facerect\r\n \r\n \r\n x,y,w,h=facerect\r\n \r\n # Seek an eye in the upper-left part of the face. \r\n searchRect = (x+w/7, y, w*2/7, h/2) \r\n face.leftEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek an eye in the upper-right part of the face. \r\n searchRect = (x+w*4/7, y, w*2/7, h/2) \r\n face.rightEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek a nose in the middle part of the face. \r\n searchRect = (x+w/4, y+h/4, w/2, h/2) \r\n face.noseRect = self._detectOneObject( \r\n self._noseClassifier, image, searchRect, 32) \r\n \r\n # Seek a mouth in the lower-middle part of the face. \r\n searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) \r\n face.mouthRect = self._detectOneObject( \r\n self._mouthClassifier, image, searchRect, 16) \r\n \r\n \r\n \r\n self._faces.append(face)\r\n\r\n \r\n \r\n def _detectoneobject(self,\r\n classifier,\r\n image,\r\n rect,\r\n imagesizetominsizeratio):\r\n \r\n x ,y ,w ,h=rect\r\n \r\n minsize=util.widthheightdividedby(image,\r\n imagesizetominsizeratio)\r\n \r\n subimage=image[y:y+h,x:x+w]\r\n \r\n subrect=classifier.dectectMultiScale(subimage,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n if len(subrect)==0:\r\n return None\r\n \r\n subx,suby,subw,subh=subrects[0]\r\n \r\n return (x+subx,y+suby,w+subw,h+subh)\r\n \r\n \"\"\"", "def check(self):\n basic_recognized = 1\n # Scan the array, in order to check if the primitives are recognized correctly\n for frame in self.data_array:\n hmm_name = frame.best_log_probability[0]\n\n if str(basic_recognized) in hmm_name or str(basic_recognized+1) in hmm_name:\n if str(basic_recognized+1) in hmm_name:\n basic_recognized+=1\n else:\n return False\n # Has been recognized the complete gesture? If yes return true else false\n if basic_recognized == self.n_primitives+1:\n return True", "def getNakedFaceIDs(mesh):\n \n nakedFaces = []\n \n # Get naked vertices\n nPts = list( mesh.GetNakedEdgePointStatus())\n nIDs = [i for i,v in enumerate(nPts) if v == True]\n \n for i in range(mesh.Faces.Count):\n \n # Get face vertices\n f = mesh.Faces.Item[i]\n if f.IsTriangle:\n vts = (f.A,f.B,f.C)\n else:\n vts = (f.A,f.B,f.C,f.D)\n \n # Check if they are naked\n naked = False\n for vt in vts:\n if vt in nIDs:\n naked = True\n \n if naked:\n nakedFaces.append(i)\n \n return nakedFaces", "def test_has_alpha(self):\n image_3d = np.array([[ # One image with shape (1, 2, 3)\n [1, 2, 3],\n [4, 5, 6]\n ]])\n image_4d = np.array([[ # One image with shape (1, 3, 4)\n [1, 2, 3, 4],\n [4, 5, 6, 7],\n [8, 9, 10, 11]\n ]])\n image_5d = np.array([[ # One image with shape (1, 1, 5)\n [1, 2, 3, 4, 5]\n ]])\n self.assertEqual(localHDR.has_alpha(image_3d), False)\n self.assertEqual(localHDR.has_alpha(image_4d), True)\n self.assertEqual(localHDR.has_alpha(image_5d), False)", "def test_faces(self):\n\n self.test_shape.workplane = \"XY\"\n self.test_shape.rotation_axis = \"Z\"\n\n assert self.test_shape.area == pytest.approx((((math.pi * (10**2)) * 2) + (math.pi * (10 * 2) * 100)) * 8)\n assert len(self.test_shape.areas) == 24\n assert self.test_shape.areas.count(pytest.approx(math.pi * (10**2))) == 16\n assert self.test_shape.areas.count(pytest.approx(math.pi * (10 * 2) * 100)) == 8", "def _analyze(self):\n frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n faces = self._face_detector(frame)\n try:\n landmarks = self._predictor(frame, faces[0])\n self.eye_left = Eye(frame, landmarks, 0, self.calibration)\n self.eye_right = Eye(frame, landmarks, 1, self.calibration)\n except IndexError:\n self.eye_left = None\n self.eye_right = None", "def image_present_check(self):\r\n if not self.master.images: # If no images present in the list\r\n messagebox.showerror(\"Error\", 'No image selected') # Throw up the error messagebox\r\n\r\n else:\r\n return True # If there are images present in the list, then return True value\r", "def image(self):\n return self.any_image(-1)", "def get_faces(self, image):\n\t\t\n\t\t# Convert the image to grayscale and normalise\n\t\tcv.CvtColor(image, self.gray, cv.CV_BGR2GRAY)\n\t\tcv.EqualizeHist(self.gray, self.gray)\n\t\t\n\t\t# Detect faces\n\t\treturn cv.HaarDetectObjects(self.gray, self.cascade, self.storage,\n\t\t scale_factor = 1.3,\n\t\t min_neighbors = 2,\n\t\t flags = cv.CV_HAAR_DO_CANNY_PRUNING,\n\t\t min_size = (40,40))", "def paint_faces_data(frame, faces_data):\n for face in faces_data:\n (top, right, bottom, left) = face['location']\n\n if face['identity'] is None:\n name = 'Unknown'\n color = (0, 0, 255) # red\n else:\n name = face['identity']\n color = (0, 128, 0) # dark green\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "def _detect(self):\n return True", "def not_in_image(self) -> bool:\n return not self.vector", "def ssd_face_detection(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np_expanded = np.expand_dims(image, axis=0)\n\n (boxes, scores, classes) = FaceDetectorModel().sess.run(\n [FaceDetectorModel().boxes, FaceDetectorModel().scores, FaceDetectorModel().classes],feed_dict={FaceDetectorModel().image_tensor: image_np_expanded})\n\n face_list = vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4)\n\n return face_list", "def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False", "def detect_faces_from_image(self, img, desired_width,\n desired_height, bbox_scaling=1.1):\n \n n_face = 0\n faces_data = []\n\n frame_height = img.shape[0]\n frame_width = img.shape[1]\n blob = cv2.dnn.blobFromImage(img, 1.0, (300, 300), [104, 117, 123], True, False)\n self.face_detector.setInput(blob)\n detections = self.face_detector.forward()\n \n \n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > self.threshold:\n n_face += 1\n bbox = _get_bbox_pts(detections, i, frame_width, frame_height)\n x1, y1 = [int(i * abs(bbox_scaling//1 - bbox_scaling%1)) for i in bbox[:2]]\n x2, y2 = [int(i*bbox_scaling) for i in bbox[2:]]\n if x1 < x2 and y1 < y2:\n dets = [dlib.rectangle(x1, y1, x2, y2)]\n else:\n dets = [dlib.rectangle(0, 0, frame_width, frame_height)]\n\n \n face_img, left_eye, right_eye = self.align_and_crop_face(img, dets, desired_width,\n desired_height)\n \n face_data = [dets, face_img, left_eye, right_eye,\n 'face_%d' % n_face, confidence]\n faces_data.append(face_data)\n\n return faces_data" ]
[ "0.7674138", "0.72298473", "0.7020681", "0.6924796", "0.6771849", "0.67457414", "0.67417926", "0.66322297", "0.6547639", "0.6388487", "0.6315418", "0.6311557", "0.62890303", "0.6275695", "0.6254575", "0.62504184", "0.62351817", "0.6222348", "0.6187851", "0.6173235", "0.6154409", "0.61344326", "0.6125496", "0.61066365", "0.61066365", "0.60854256", "0.6082778", "0.6032442", "0.602328", "0.60100776", "0.5992032", "0.5988415", "0.59849477", "0.59808743", "0.5965699", "0.59477437", "0.5932849", "0.5931786", "0.5928317", "0.5923464", "0.5902754", "0.5898241", "0.5891863", "0.58848137", "0.58762217", "0.58461374", "0.580361", "0.57874155", "0.5767124", "0.576491", "0.57528526", "0.5752139", "0.5747983", "0.5735648", "0.5733463", "0.572748", "0.57261777", "0.56890994", "0.56824034", "0.5680578", "0.5679962", "0.5677781", "0.56764793", "0.5673597", "0.5669054", "0.56682044", "0.56514174", "0.5636737", "0.5629734", "0.56232864", "0.56187725", "0.56122756", "0.5609815", "0.5605587", "0.5596605", "0.55927", "0.5591757", "0.558564", "0.5583919", "0.557791", "0.5576684", "0.5573211", "0.5561383", "0.55564857", "0.55564857", "0.5538018", "0.5536127", "0.5535398", "0.55326164", "0.55284184", "0.55212104", "0.55147994", "0.5514538", "0.550971", "0.5496613", "0.54958713", "0.5494839", "0.5494614", "0.5485176", "0.54828113" ]
0.6130096
22
A helper function to getnerate quaternions from yaws.
def heading(yaw): q = euler2quat(0.0, 0.0, yaw) quat = Quaternion() quat.w = q[0] quat.x = q[1] quat.y = q[2] quat.z = q[3] return quat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def euler_from_quaternion(x, y, z, w):\r\n\tt0 = +2.0 * (w * x + y * z)\r\n\tt1 = +1.0 - 2.0 * (x * x + y * y)\r\n\troll_x = math.atan2(t0, t1)\r\n\r\n\tt2 = +2.0 * (w * y - z * x)\r\n\tt2 = +1.0 if t2 > +1.0 else t2\r\n\tt2 = -1.0 if t2 < -1.0 else t2\r\n\tpitch_y = math.asin(t2)\r\n\r\n\tt3 = +2.0 * (w * z + x * y)\r\n\tt4 = +1.0 - 2.0 * (y * y + z * z)\r\n\tyaw_z = math.atan2(t3, t4)\r\n\r\n\treturn roll_x, pitch_y, yaw_z # in radians\r", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def quat_to_yaw_deg(qx,qy,qz,qw):\n degree = pi/180\n sqy = qy*qy\n sqz = qz*qz\n siny = 2 * (qw*qz+qx*qy)\n cosy = 1 - 2*(qy*qy+qz*qz)\n yaw = int(atan2(siny,cosy)/degree)\n return yaw", "def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def test_quaternion_invert():\n q = np.array([0.58183503, -0.75119889, -0.24622332, 0.19116072])\n q_inv = pr.q_conj(q)\n q_q_inv = pr.concatenate_quaternions(q, q_inv)\n assert_array_almost_equal(pr.q_id, q_q_inv)", "def quaternion_to_euler(q):\r\n W = q[0]\r\n X = q[1]\r\n Y = q[2]\r\n Z = q[3]\r\n\r\n # roll(x - axis rotation)\r\n sinr_cosp = +2.0 * (W * X + Y * Z)\r\n cosr_cosp = +1.0 - 2.0 * (X * X + Y * Y)\r\n roll = math.atan2(sinr_cosp, cosr_cosp)\r\n\r\n # pitch(y - axis rotation)\r\n sinp = +2.0 * (W * Y - Z * X)\r\n if abs(sinp) >= 1:\r\n pitch = np.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\r\n else:\r\n pitch = math.asin(sinp)\r\n\r\n # yaw(z - axis rotation)\r\n siny_cosp = +2.0 * (W * Z + X * Y)\r\n cosy_cosp = +1.0 - 2.0 * (Y * Y + Z * Z)\r\n yaw = math.atan2(siny_cosp, cosy_cosp)\r\n\r\n return roll, pitch, yaw", "def to_q(self, method: str = 'chiaverini', **kw) -> np.ndarray:\n return self.to_quaternion(method=method, **kw)", "def random_quaternion():\n\n import numpy as np\n \n while True: # Loop until within unit disk\n zeta = 2.0*np.random.rand(2) - 1.0 # Two uniform random numbers between -1 and 1\n norm1 = np.sum ( zeta**2 ) # Squared magnitude\n if norm1 < 1.0: # Test for within unit disk\n break\n\n while True: # Loop until within unit disk\n beta = 2.0*np.random.rand(2) - 1.0 # Two uniform random numbers between -1 and 1\n norm2 = np.sum ( beta**2 ) # Squared magnitude\n if norm2 < 1.0: # Test for within unit disk\n break\n\n f = np.sqrt ( ( 1.0 - norm1 ) / norm2 )\n return np.array ( ( zeta[0], zeta[1], beta[0]*f, beta[1]*f ), dtype=np.float_ ) # Random quaternion", "def euler_to_quat(self, yaw):\n quat_array = t.quaternion_from_euler(0.0, 0.0, yaw)\n return Quaternion(quat_array[0], quat_array[1], quat_array[2], quat_array[3])", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def test_quaternion_hamilton():\n q_ij = pr.concatenate_quaternions(pr.q_i, pr.q_j)\n assert_array_equal(pr.q_k, q_ij)\n q_ijk = pr.concatenate_quaternions(q_ij, pr.q_k)\n assert_array_equal(-pr.q_id, q_ijk)", "def test_quaternion_conventions():\n q_wxyz = np.array([1.0, 0.0, 0.0, 0.0])\n q_xyzw = pr.quaternion_xyzw_from_wxyz(q_wxyz)\n assert_array_equal(q_xyzw, np.array([0.0, 0.0, 0.0, 1.0]))\n q_wxyz2 = pr.quaternion_wxyz_from_xyzw(q_xyzw)\n assert_array_equal(q_wxyz, q_wxyz2)\n\n random_state = np.random.RandomState(42)\n q_wxyz_random = pr.random_quaternion(random_state)\n q_xyzw_random = pr.quaternion_xyzw_from_wxyz(q_wxyz_random)\n assert_array_equal(q_xyzw_random[:3], q_wxyz_random[1:])\n assert_equal(q_xyzw_random[3], q_wxyz_random[0])\n q_wxyz_random2 = pr.quaternion_wxyz_from_xyzw(q_xyzw_random)\n assert_array_equal(q_wxyz_random, q_wxyz_random2)", "def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat", "def qrst_tm_ao(y):\n return (y - (-0.6685))/0.2228", "def to_quaternion(self, method: str = 'chiaverini', **kw) -> np.ndarray:\n q = np.array([1., 0., 0., 0.])\n if method.lower()=='hughes':\n q = hughes(self.A)\n if method.lower()=='chiaverini':\n q = chiaverini(self.A)\n if method.lower()=='shepperd':\n q = shepperd(self.A)\n if method.lower()=='itzhack':\n q = itzhack(self.A, version=kw.get('version', 3))\n if method.lower()=='sarabandi':\n q = sarabandi(self.A, eta=kw.get('threshold', 0.0))\n return q/np.linalg.norm(q)", "async def attitude_quaternion(self):\n\n request = telemetry_pb2.SubscribeAttitudeQuaternionRequest()\n attitude_quaternion_stream = self._stub.SubscribeAttitudeQuaternion(request)\n\n try:\n async for response in attitude_quaternion_stream:\n \n\n \n yield Quaternion.translate_from_rpc(response.attitude_quaternion)\n finally:\n attitude_quaternion_stream.cancel()", "def get_rot_dtdt(self) -> WAQuaternion:\n pass", "def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw", "def random_quaternions(count=100):\n rands = np.random.rand(count,3)\n root_1 = np.sqrt(rands[:,0])\n minus_root_1 = np.sqrt(1-rands[:,0])\n two_pi_2 = np.pi*2*rands[:,1]\n two_pi_3 = np.pi*2*rands[:,2]\n \n res = np.zeros((count,4))\n res[:,0] = minus_root_1*np.sin(two_pi_2)\n res[:,1] = minus_root_1*np.cos(two_pi_2)\n res[:,2] = root_1*np.sin(two_pi_3)\n res[:,3] = root_1*np.cos(two_pi_3)\n \n return res", "def multiply_quaternions( qa, qb ):\n combined = Quaternion()\n\n combined.w = (qa.w * qb.w - qa.x * qb.x - qa.y * qb.y - qa.z * qb.z)\n combined.x = (qa.x * qb.w + qa.w * qb.x + qa.y * qb.z - qa.z * qb.y)\n combined.y = (qa.w * qb.y - qa.x * qb.z + qa.y * qb.w + qa.z * qb.x)\n combined.z = (qa.w * qb.z + qa.x * qb.y - qa.y * qb.x + qa.z * qb.w)\n return combined", "def test_check_quaternions():\n Q_list = [[1, 0, 0, 0]]\n Q = pr.check_quaternions(Q_list)\n assert_array_almost_equal(Q_list, Q)\n assert_equal(type(Q), np.ndarray)\n assert_equal(Q.dtype, np.float64)\n assert_equal(Q.ndim, 2)\n assert_array_equal(Q.shape, (1, 4))\n\n Q = np.array([\n [2, 0, 0, 0],\n [3, 0, 0, 0],\n [4, 0, 0, 0],\n [5, 0, 0, 0]\n ])\n Q = pr.check_quaternions(Q)\n for i in range(len(Q)):\n assert_almost_equal(np.linalg.norm(Q[i]), 1)\n\n assert_raises_regexp(ValueError, \"Expected quaternion array with shape\",\n pr.check_quaternions, np.zeros(4))\n assert_raises_regexp(ValueError, \"Expected quaternion array with shape\",\n pr.check_quaternions, np.zeros((3, 3)))\n\n Q = np.array([[0.0, 1.2, 0.0, 0.0]])\n Q2 = pr.check_quaternions(Q, unit=False)\n assert_array_almost_equal(Q, Q2)", "def euler_to_quaternion(euler: tuple) -> object:\n\n (yaw, pitch, roll) = (euler[0], euler[1], euler[2])\n qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n return qx, qy, qz, qw", "def convert_euler_to_quaternion(roll, yaw, pitch):\n\n # roll (z), yaw (y), pitch (x)\n\n cy = math.cos(math.radians(roll) * 0.5)\n sy = math.sin(math.radians(roll) * 0.5)\n\n cp = math.cos(math.radians(yaw) * 0.5)\n sp = math.sin(math.radians(yaw) * 0.5)\n\n cr = math.cos(math.radians(pitch) * 0.5)\n sr = math.sin(math.radians(pitch) * 0.5)\n\n w = cy * cp * cr + sy * sp * sr\n x = cy * cp * sr - sy * sp * cr\n y = sy * cp * sr + cy * sp * cr\n z = sy * cp * cr - cy * sp * sr\n\n quat = np.array([w, x, y, z])\n quat = quat / np.linalg.norm(quat)\n return quat", "def _create_quaternion(direction, up) -> Tuple[float, float, float, float]:\n direction = direction / spy.vnorm(direction)\n up = up / spy.vnorm(up)\n\n x = spy.vcrss(up, direction)\n x = x / spy.vnorm(x)\n y = spy.vcrss(direction, x)\n y = y / spy.vnorm(y)\n z = direction\n\n r = sqrt(1.0 + x[0] + y[1] + z[2]) * 0.5\n i = (y[2] - z[1]) / (4 * r)\n j = (z[0] - x[2]) / (4 * r)\n k = (x[1] - y[0]) / (4 * r)\n\n return r, i, j, k", "def yaw_ned_to_enu(radian):\n q_ned = tf.transformations.quaternion_from_euler(0, 0, radian)\n q_enu = np.array([q_ned[1], q_ned[0], -q_ned[2], q_ned[3]])\n q_90 = tf.transformations.quaternion_from_euler(0, 0, np.pi / 2.)\n q_enu = tf.transformations.quaternion_multiply(quaternion0=q_enu,\n quaternion1=q_90)\n yaw_enu = tf.transformations.euler_from_quaternion(q_enu)[-1]\n return yaw_enu", "def rotate_q_exp(self, qx, qy, qz):\n # qx, qy, qz = self.rotate_q_exp(qx, qy, qz)\n \n q_vector = np.array( [[qx],[qy],[qz]] )\n \n q_rotated = np.dot( self.rotation_matrix_exp, q_vector )\n qx = q_rotated[0,0]\n qy = q_rotated[1,0]\n qz = q_rotated[2,0]\n \n return qx, qy, qz", "def get_euler_angle_from_quat(w, x, y, z):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n alpha = math.atan2(t0, t1) * 180 / math.pi\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n beta = math.asin(t2) * 180 / math.pi\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n gamma = math.atan2(t3, t4) * 180 / math.pi\n return alpha, beta, gamma", "def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def test_interpolate_same_quaternion():\n n_steps = 3\n random_state = np.random.RandomState(42)\n a = pr.random_axis_angle(random_state)\n q = pr.quaternion_from_axis_angle(a)\n traj = [pr.quaternion_slerp(q, q, t) for t in np.linspace(0, 1, n_steps)]\n assert_equal(len(traj), n_steps)\n assert_array_almost_equal(traj[0], q)\n assert_array_almost_equal(traj[1], q)\n assert_array_almost_equal(traj[2], q)", "def quat2expmap(q):\n if np.abs(np.linalg.norm(q) - 1) > 1e-3:\n # raise ValueError\n print(\"corrupteeeeed....\")\n\n sinhalftheta = np.linalg.norm(q[1:])\n coshalftheta = q[0]\n\n r0 = np.divide(q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps))\n theta = 2 * np.arctan2(sinhalftheta, coshalftheta)\n theta = np.mod(theta + 2 * np.pi, 2 * np.pi)\n\n if theta > np.pi:\n theta = 2 * np.pi - theta\n r0 = -r0\n\n r = r0 * theta\n return r", "def rpy_from_quaternion(quaternion):\n (yaw, pitch, roll) = quaternion.yaw_pitch_roll\n return (roll, pitch, yaw)", "def qgset_ao(y):\n return (y - (- 0.8565))/0.2855", "def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n return (roll,pitch,yaw)", "def quatRightMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tR = np.zeros((4, 4))\n\tR[0, 0] = s\n\tR[0, 1:] = -v\n\tR[1:, 0] = v\n\tR[1:, 1:] = s*np.eye(3) - skewMat(v)\n\treturn R", "def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q", "def euler_to_quaternion(yaw, pitch, roll):\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw", "def quarter_chord(self) -> np.ndarray:\n return 0.75 * self.xyz_le + 0.25 * self.xyz_te()", "def test_quaternion_diff():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q1 = pr.random_quaternion(random_state)\n q2 = pr.random_quaternion(random_state)\n a_diff = pr.quaternion_diff(q1, q2) # q1 - q2\n q_diff = pr.quaternion_from_axis_angle(a_diff)\n q3 = pr.concatenate_quaternions(q_diff, q2) # q1 - q2 + q2\n pr.assert_quaternion_equal(q1, q3)", "def average_quaternion(quaternions, weights=None):\n \n quaternions = np.array(quaternions)\n # short circuit if only one quat\n if quaternions.ndim == 1:\n assert quaternions.shape[0] == 4\n return quaternions\n \n assert quaternions.shape[1] == 4\n assert weights is None or quaternions.shape[0] == weights.shape[0]\n \n if weights is None:\n M = np.einsum('ij,ik->jk', quaternions, quaternions)\n else:\n M = np.einsum('ij,ik,i->jk', quaternions, quaternions, weights)\n \n # M is guaranteed to by symmetric by construction\n _, v = scipy.linalg.eigh(M)\n qnorm = np.linalg.norm(v[:,-1])\n if np.isclose(qnorm, 0.):\n # average is undefined, e.g. you tried to average q and p where <q,p> = 0\n return None\n qf = v[:,-1] / qnorm\n \n # Check the sign\n q_approx = np.sum(quaternions, axis=0)\n if np.isclose(np.linalg.norm(q_approx), 0.):\n # average direction is undefined, just return what we've got\n return qf\n elif np.dot(qf, q_approx) < 0:\n qf *= -1\n \n return qf", "def convert_quaternion_to_euler(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)", "def q2xyz_func(q): \n if use_scale:\n q = scale_up(q)\n t = calc_T(q) \n return t[0], t[1], t[2]", "def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate", "def quaternion_to_RPY(q: array):\n\n roll: float = arctan2(2 * (q[0] * q[1] + q[2] * q[3]), 1 - (2 * (power(q[1], 2) + power(q[2], 2))))\n pitch: float = arcsin(2 * (q[0] * q[2] - q[3] * q[1]))\n yaw: float = arctan2(2 * (q[0] * q[3] + q[1] * q[2]), 1 - (2 * (power(q[2], 2) + power(q[3], 2))))\n\n return roll, pitch, yaw", "def yaw2quat(yaw, base_quat=None):\n rot_euler = [yaw, 0, 0]\n abs_rot = R.from_euler('zyx', rot_euler)\n if base_quat is not None:\n base_rot = R.from_quat([base_quat])\n abs_rot = base_rot.reduce(left=abs_rot)\n return abs_rot.as_quat()", "def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)", "def QuatNormalize(wxyz):\n return wxyz/rigmech.QuatMag(wxyz)", "def quaternion_product(q1, q2):\r\n Wa = q1[0]\r\n Wb = q2[0]\r\n Xa = q1[1]\r\n Xb = q2[1]\r\n Ya = q1[2]\r\n Yb = q2[2]\r\n Za = q1[3]\r\n Zb = q2[3]\r\n x = Xa * Wb + Ya * Zb - Za * Yb + Wa * Xb\r\n y = -Xa * Zb + Ya * Wb + Za * Xb + Wa * Yb\r\n z = Xa * Yb - Ya * Xb + Za * Wb + Wa * Zb\r\n w = -Xa * Xb - Ya * Yb - Za * Zb + Wa * Wb\r\n return [w, x, y, z]", "def multiply_quaternions(quats1,quats2):\n w1 = quats1[:,0]\n x1 = quats1[:,1]\n y1 = quats1[:,2]\n z1 = quats1[:,3]\n\n w2 = quats2[:,0]\n x2 = quats2[:,1]\n y2 = quats2[:,2]\n z2 = quats2[:,3]\n\n res = np.zeros((quats1.shape[0],4))\n \n res[:,0] = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2\n res[:,1] = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2\n res[:,2] = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2\n res[:,3] = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 \n return res", "def from_quaternion(self, q: np.ndarray) -> np.ndarray:\n if q is None:\n return np.identity(3)\n if q.shape[-1]!=4 or q.ndim>2:\n raise ValueError(\"Quaternion must be of the form (4,) or (N, 4)\")\n if q.ndim>1:\n q /= np.linalg.norm(q, axis=1)[:, None] # Normalize\n R = np.zeros((q.shape[0], 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)\n R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])\n R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])\n R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])\n R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)\n R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])\n R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])\n R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])\n R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)\n return R\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])", "def check_quat(test_quat, ref_quat):\n test_quat = np.asarray(test_quat)\n ref_quat = np.asarray(ref_quat)\n dot = np.dot(test_quat, ref_quat)\n if dot < 0:\n test_quat = - test_quat\n return test_quat.tolist()", "def dcm_to_quaternions(dcm):\n trace = np.trace(dcm)\n b_2 = (1/4)*np.array([(1+trace), (1 + 2*dcm[0, 0] - trace), (1 + 2*dcm[1, 1] - trace), (1 + 2*dcm[2, 2] - trace)])\n argmax = np.argmax(b_2)\n b = np.zeros(4)\n b[argmax] = np.sqrt(b_2[argmax])\n\n # There is probably a cleaner way to do these checks\n if argmax == 0:\n b[1] = (dcm[1, 2] - dcm[2, 1])/(4*b[0])\n b[2] = (dcm[2, 0] - dcm[0, 2])/(4*b[0])\n b[3] = (dcm[0, 1] - dcm[1, 0])/(4*b[0])\n elif argmax == 1:\n b[0] = (dcm[1, 2] - dcm[2, 1])/(4*b[1])\n b[2] = (dcm[0, 1] + dcm[1, 0])/(4*b[1])\n b[3] = (dcm[2, 0] + dcm[0, 2])/(4*b[1])\n elif argmax == 2:\n b[0] = (dcm[2, 0] - dcm[0, 2])/(4*b[2])\n b[1] = (dcm[0, 1] + dcm[1, 0])/(4*b[2])\n b[3] = (dcm[1, 2] + dcm[2, 1])/(4*b[2])\n elif argmax == 3:\n b[0] = (dcm[0, 1] - dcm[1, 0])/(4*b[3])\n b[1] = (dcm[2, 0] + dcm[0, 2])/(4*b[3])\n b[2] = (dcm[1, 2] + dcm[2, 1])/(4*b[3])\n\n # last step to make sure we have the 'short rotation'\n if b[0] < 0:\n b = -b\n\n return b", "def _quatm(q1, q0):\n w0, x0, y0, z0 = q0\n w1, x1, y1, z1 = q1\n\n return torch.cuda.FloatTensor([\n -x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0,\n ])", "def quatPassiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q).T @ quatRightMat(q) @ v_q\n\treturn v_qnew[1:]", "def quaternion(self, name, q):\n R = self.R(name=name, q=q)\n quat = transformations.unit_vector(\n transformations.quaternion_from_matrix(matrix=R))\n return quat", "def rotate_ZNE_LQT(z, n, e, ba, inc):\n if len(z) != len(n) or len(z) != len(e):\n raise TypeError(\"Z, North and East component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba *= 2 * pi / 360\n inc *= 2 * pi / 360\n l = z * cos(inc) - n * sin(inc) * cos(ba) - e * sin(inc) * sin(ba)\n q = z * sin(inc) + n * cos(inc) * cos(ba) + e * cos(inc) * sin(ba)\n t = n * sin(ba) - e * cos(ba)\n return l, q, t", "def invert_quaternion(quaternion):\n norm = np.linalg.norm(quaternion)\n quaternion[1:] = -1.0 * quaternion[1:]\n return quaternion / norm", "def tool_quat(self):\n return self.sim.data.get_body_xquat(self.end_effector)", "def quat2mat(self,quat):\n quat = np.asarray(quat, dtype=np.float64)\n assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n Nq = np.sum(quat * quat, axis=-1)\n s = 2.0 / Nq\n X, Y, Z = x * s, y * s, z * s\n wX, wY, wZ = w * X, w * Y, w * Z\n xX, xY, xZ = x * X, x * Y, x * Z\n yY, yZ, zZ = y * Y, y * Z, z * Z\n\n mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n mat[..., 0, 0] = 1.0 - (yY + zZ)\n mat[..., 0, 1] = xY - wZ\n mat[..., 0, 2] = xZ + wY\n mat[..., 1, 0] = xY + wZ\n mat[..., 1, 1] = 1.0 - (xX + zZ)\n mat[..., 1, 2] = yZ - wX\n mat[..., 2, 0] = xZ - wY\n mat[..., 2, 1] = yZ + wX\n mat[..., 2, 2] = 1.0 - (xX + yY)\n return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))", "def quatreal(q):\n a = q[0,0]\n b = q[0,1]\n c = q[0,2]\n d = q[0,3]\n amat = a*np.identity(4)\n bmat = b*np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,-1],[0,0,1,0]])\n cmat = c*np.array([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])\n dmat = d*np.array([[0,0,0,1],[0,0,-1,0],[0,1,0,0],[-1,0,0,0]])\n return amat+bmat+cmat+dmat", "def dcm_to_quaternions_bad(dcm):\n b0 = (1/2)*np.sqrt(np.trace(dcm) + 1)\n return np.array([b0, (dcm[1, 2] - dcm[2, 1])/(4*b0), (dcm[2, 0] - dcm[0, 2])/(4*b0),\n (dcm[0, 1] - dcm[1, 0]) / (4*b0)])", "def _pot_quat(self):\n return T.convert_quat(self.sim.data.body_xquat[self.cube_body_id], to=\"xyzw\")", "def test_conversions_axis_angle_quaternion():\n q = np.array([1, 0, 0, 0])\n a = pr.axis_angle_from_quaternion(q)\n assert_array_almost_equal(a, np.array([1, 0, 0, 0]))\n q2 = pr.quaternion_from_axis_angle(a)\n assert_array_almost_equal(q2, q)\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n q = pr.quaternion_from_axis_angle(a)\n\n a2 = pr.axis_angle_from_quaternion(q)\n assert_array_almost_equal(a, a2)\n\n q2 = pr.quaternion_from_axis_angle(a2)\n pr.assert_quaternion_equal(q, q2)", "def convert_quaternion_frames_to_euler_frames(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)", "def quatActiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q) @ quatRightMat(q).T @ v_q\n\treturn v_qnew[1:]", "def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles", "def tf_enu_to_ned(orientation_q):\n return _transform_orientation(\"global_frames\", orientation_q)", "def quatError(Qdes, Q):\n\n we = Qdes[0]*Q[0] + np.dot(Qdes[1:4].transpose(),Q[1:4]) - 1\n e = -Qdes[0]*Q[1:4] + Q[0]*Qdes[1:4] - np.cross(np.transpose(Qdes[1:4]), np.transpose(Q[1:4]))\n Qe = np.array([ we, e[0], e[1], e[2] ])\n\n return Qe", "def inverse(self):\n q_vector = np.zeros(4)\n q_vector[:3] = self.imaginary*-1\n q_vector[3] = self.w\n return Quat(q_vector,\"xyzw\")", "def get_wave(q):\n\n approximant = 'SEOBNRv4'\n chi1 = [0,0,0]\n chi2 = [0,0,0]\n deltaTOverM = 0.1\n omega0 = 2e-2\n\n t, h = LALPy.generate_LAL_waveform(approximant, q, chi1, chi2, deltaTOverM, omega0)\n\n Amp = np.abs(h)\n peakIdx = np.argmax(Amp)\n\n t -= t[peakIdx]\n\n tmin = -500\n if min(t) > tmin:\n raise Exception('Data not long enough, decrease omega0.')\n keepIdx = t - tmin > -1e-3 # simple hack to ensure t_vec is always nearly the same\n t = t[keepIdx]\n h = h[keepIdx]\n\n tmax = 100\n keepIdx = t - tmax < 1e-3\n t = t[keepIdx]\n h = h[keepIdx]\n\n return t, h", "def invkin(x, y, z):\n d1 = 159 # hoejden fra bordplade til 2. led.\n a1 = 0 # forskydningen langs y-aksen mellem 1. og 2. led.\n a2 = 160 # afstanden mellem 2. og 3. led.\n d4 = 230 # afstanden fra 3. led og ud til griberens gribepunkts inkl. 4. led.\n\n q1 = math.atan2(y, x)\n r2 = (x - a1 * math.cos(q1)) ** 2 + (y - a1 * math.sin(q1)) ** 2\n s = (z - d1)\n D = (r2 + s ** 2 - a2 ** 2 - d4 ** 2) / (2 * a2 * d4)\n q3 = math.atan2(-math.sqrt(1 - D ** 2), D)\n q2 = math.atan2(s, math.sqrt(r2)) - math.atan2(d4 * math.sin(q3), a2 + d4 * math.cos(q3)) - math.pi / 2\n q4 = 0 # Der tages ikke hoejde for griberens rotation i denne opgave.\n\n return q1, q2, q3, q4", "def to_quaternion(self, roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def quaternion_covariance(quaternions, weights=None, avg=None):\n if avg is None:\n avg = average_quaternion(quaternions, weights)\n avg_inv = transformations.quaternion_inverse(avg)\n \n # Compute the Euler angle representation of each quaternion offset\n euler = []\n for q in quaternions:\n q_diff = transformations.quaternion_multiply(q, avg_inv)\n euler.append(transformations.euler_from_quaternion(q_diff))\n return np.std(euler, axis=0)", "def to_DCM(self) -> np.ndarray:\n if not all(self.is_versor()):\n raise AttributeError(\"All quaternions must be versors to be represented as Direction Cosine Matrices.\")\n R = np.zeros((self.num_qts, 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(self.y**2 + self.z**2)\n R[:, 1, 0] = 2.0*(self.x*self.y+self.w*self.z)\n R[:, 2, 0] = 2.0*(self.x*self.z-self.w*self.y)\n R[:, 0, 1] = 2.0*(self.x*self.y-self.w*self.z)\n R[:, 1, 1] = 1.0 - 2.0*(self.x**2 + self.z**2)\n R[:, 2, 1] = 2.0*(self.w*self.x+self.y*self.z)\n R[:, 0, 2] = 2.0*(self.x*self.z+self.w*self.y)\n R[:, 1, 2] = 2.0*(self.y*self.z-self.w*self.x)\n R[:, 2, 2] = 1.0 - 2.0*(self.x**2 + self.y**2)\n return R", "def get_best_quaternion(coordlist1, coordlist2):\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n\n N = np.matrix([[N11, N12, N13, N14],\n [N21, N22, N23, N24],\n [N31, N32, N33, N34],\n [N41, N42, N43, N44]])\n\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1, ).tolist()\n return quat, max(w)", "def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)", "def rotate_LQT_ZNE(l, q, t, ba, inc):\n if len(l) != len(q) or len(l) != len(t):\n raise TypeError(\"L, Q and T component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba *= 2 * pi / 360\n inc *= 2 * pi / 360\n z = l * cos(inc) + q * sin(inc)\n n = -l * sin(inc) * cos(ba) + q * cos(inc) * cos(ba) + t * sin(ba)\n e = -l * sin(inc) * sin(ba) + q * cos(inc) * sin(ba) - t * cos(ba)\n return z, n, e", "def GetQuaternion(self, atTime):\n return _gmat_py.SpiceAttitude_GetQuaternion(self, atTime)", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def Equ_wave (previous_U):\n return lambda U: (U-previous_U)/DELTA_t+G((U + previous_U)/2)", "def qrst_tm(x):\n return 0.2228*x - 0.6685", "def test_points_on_1sphere_4y():\n points = generate.points_on_1sphere(4, 'y')\n assert np.allclose(points[0], cst.quat1)\n assert np.allclose(points[1], cst.quaty90)\n assert np.allclose(points[2], cst.quaty)", "def quat2mat(self,quat):\n\t quat = np.asarray(quat, dtype=np.float64)\n\t assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n\t w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n\t Nq = np.sum(quat * quat, axis=-1)\n\t s = 2.0 / Nq\n\t X, Y, Z = x * s, y * s, z * s\n\t wX, wY, wZ = w * X, w * Y, w * Z\n\t xX, xY, xZ = x * X, x * Y, x * Z\n\t yY, yZ, zZ = y * Y, y * Z, z * Z\n\n\t mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n\t mat[..., 0, 0] = 1.0 - (yY + zZ)\n\t mat[..., 0, 1] = xY - wZ\n\t mat[..., 0, 2] = xZ + wY\n\t mat[..., 1, 0] = xY + wZ\n\t mat[..., 1, 1] = 1.0 - (xX + zZ)\n\t mat[..., 1, 2] = yZ - wX\n\t mat[..., 2, 0] = xZ - wY\n\t mat[..., 2, 1] = yZ + wX\n\t mat[..., 2, 2] = 1.0 - (xX + yY)\n\t return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))", "def quat_mean(quaternions):\n if len(quaternions) <= 0:\n return np.nan\n elif len(quaternions) == 1:\n # Only one quaternion, it is the average of itself\n return quaternions[0]\n elif len(quaternions) == 2:\n # We have weird errors for 2 quaternions using the matrix\n # We use the closed form solution given in\n # https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20070017872.pdf\n q1 = np.asarray(quaternions[0])\n q2 = np.asarray(quaternions[1])\n dot = np.dot(q1, q2)\n if dot < 0:\n # The vectors don't have the same handedness, invert one\n q2 = -1 * q2\n dot = -dot\n if dot == 0:\n if q1[0] > q2[0]:\n return q1\n return q2\n z = np.sqrt((q1[0] - q2[2]) * (q1[0] - q2[2]) + 4 * q1[0] * q2[0] * dot * dot)\n result = 2 * q1[0] * dot * q1 + (q2[0] - q1[0] + z) * q2\n return result / np.linalg.norm(result)\n else:\n # Quaternion average from the eigenvectors of the sum matrix\n # See: https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20070017872.pdf\n # We have at least 3 quaternions, make sure they're of the same handedness\n q_mat = np.asarray([\n q if np.dot(q, quaternions[0]) > 0 else -1 * np.asarray(q)\n for q in quaternions\n ])\n product = np.dot(q_mat.T, q_mat) # Computes sum([q * q.T for q in quaterions])\n evals, evecs = np.linalg.eig(product)\n best = -1\n result = None\n for idx in range(len(evals)):\n if evals[idx] > best:\n best = evals[idx]\n result = evecs[idx]\n if np.any(np.iscomplex(result)):\n # Mean is complex, which means the quaternions are all too close together (I think?)\n # Instead, return the Mode, the most common quaternion\n counts = [\n sum(1 for q2 in quaternions if np.array_equal(q1, q2))\n for q1 in quaternions\n ]\n best = 0\n for idx in range(len(counts)):\n if counts[idx] > best:\n best = counts[idx]\n result = quaternions[idx]\n print(\"Passing off mode as mean with {0} of {1} identical vectors\".format(best, len(quaternions)))\n return result", "def point_rotation_by_quaternion(v, q):\r\n r = [0] + v\r\n q_conj = [q[0], -q[1], -q[2], -q[3]]\r\n return quaternion_product(quaternion_product(q, r), q_conj)[1:]", "def random_attitudes(n: int = 1, representation: str = 'quaternion') -> np.ndarray:\n if not isinstance(n, int):\n raise TypeError(f\"n must be an integer. Got {type(n)}\")\n if n < 1:\n raise ValueError(f\"n must be greater than 0. Got {n}\")\n if not isinstance(representation, str):\n raise TypeError(f\"representation must be a string. Got {type(representation)}\")\n if representation.lower() not in ['rotmat', 'quaternion']:\n raise ValueError(f\"Given representation '{representation}' is NOT valid. Try 'rotmat', or 'quaternion'\")\n u = np.random.random((3, n))\n s1 = np.sqrt(1.0 - u[0])\n s2 = np.sqrt(u[0])\n t1 = 2.0 * np.pi * u[1]\n t2 = 2.0 * np.pi * u[2]\n Q = np.zeros((n, 4))\n Q[:, 0] = s2 * np.cos(t2)\n Q[:, 1] = s1 * np.sin(t1)\n Q[:, 2] = s1 * np.cos(t1)\n Q[:, 3] = s2 * np.sin(t2)\n if n < 2:\n q = Q.flatten()\n q /= np.linalg.norm(q)\n if representation.lower() == 'rotmat':\n return Quaternion(q).to_DCM()\n return q\n Q = Q / np.linalg.norm(Q, axis=1)[:, None]\n if representation.lower() == 'rotmat':\n return QuaternionArray(Q).to_DCM()\n return Q", "def _generate_panel_quaternions(self, et_start: float, et_end: float, step_s: float) -> List[MappsTimedQuaternion]:\n quaternions = []\n ets = range(int(et_start), int(et_end), step_s)\n n = len(ets)\n counter_pct = 0\n for i, et in enumerate(ets):\n if 100 * i / n > counter_pct:\n print(f\"Progress: {counter_pct} %\")\n counter_pct += 10\n JUICE_Y_in_J2000 = spy.spkcpt([0.0, 1.0, 0.0], self.probe, f\"{self.probe}_SPACECRAFT\", et, \"J2000\",\n \"OBSERVER\", \"NONE\", self.probe)[0][0:3]\n JUICE_SUN_in_J2000 = spy.spkpos(\"SUN\", et, \"J2000\", \"LT+S\", self.probe)[0]\n\n new_X, nY = self._find_new_XY_directions(JUICE_Y_in_J2000, JUICE_SUN_in_J2000)\n\n utc_time_string = spy.et2utc(et, \"ISOC\", 0) + \"Z\"\n quaternions.append(MappsTimedQuaternion(utc_time_string, *self._create_quaternion(new_X, nY)))\n\n return quaternions", "def quat2mat(q):\n #leila: https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n\n sz = quat.get_size(q)\n q0 = quat.getq0(q)\n q1 = quat.getq1(q)\n q2 = quat.getq2(q)\n q3 = quat.getq3(q)\n qt = quat.get_type(q)\n\n g = np.zeros((sz, 3, 3))\n g[:, 0, 0] = np.square(q0) + np.square(q1) - np.square(q2) - np.square(q3)\n g[:, 0, 1] = 2*(q1*q2 - q0*q3)\n g[:, 0, 2] = 2*(q3*q1 + q0*q2)\n g[:, 1, 0] = 2*(q1*q2 + q0*q3)\n g[:, 1, 1] = np.square(q0) - np.square(q1) + np.square(q2) - np.square(q3)\n g[:, 1, 2] = 2*(q2*q3 - q0*q1)\n g[:, 2, 0] = 2*(q3*q1 - q0*q2)\n g[:, 2, 1] = 2*(q2*q3 + q0*q1)\n g[:, 2, 2] = np.square(q0) - np.square(q1) - np.square(q2) + np.square(q3)\n\n if sz == 1:\n g = g.reshape((3, 3))\n if qt == -1:\n g = -g\n else:\n inds1 = np.where(qt == -1)\n g[inds1, :, :] = -g[inds1, :, :]\n\n return g", "def to_quaternion(self,roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def get_random_quaternion(self):\n random_angles = self.get_random_vector([0,0,0], [2*np.pi, 2*np.pi, 1])\n return tf.transformations.quaternion_from_euler(random_angles[0],\n random_angles[1],\n 0)", "def test_conversions_compact_axis_angle_quaternion():\n q = np.array([1, 0, 0, 0])\n a = pr.compact_axis_angle_from_quaternion(q)\n assert_array_almost_equal(a, np.array([0, 0, 0]))\n q2 = pr.quaternion_from_compact_axis_angle(a)\n assert_array_almost_equal(q2, q)\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.random_compact_axis_angle(random_state)\n q = pr.quaternion_from_compact_axis_angle(a)\n\n a2 = pr.compact_axis_angle_from_quaternion(q)\n assert_array_almost_equal(a, a2)\n\n q2 = pr.quaternion_from_compact_axis_angle(a2)\n pr.assert_quaternion_equal(q, q2)", "def get_euler_frame(quaternionion_frame):\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame", "def get_euler_frame(quaternionion_frame):\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame", "def update_mag(self, mags):\n self.log.mag(mags)\n q = self.quaternion()\n roll, pitch, heading = self.es\n\n mag_inertial = (q * quaternion.Quaternion.from_vec(np.array(mags)) * q.inv()).as_ndarray()[1:]\n mag_inertial[2] = 0\n mag_inertial /= sqrt(mag_inertial[0]**2 + mag_inertial[1]**2)\n mag_body = (q.inv() * quaternion.Quaternion.from_vec(mag_inertial) * q).as_ndarray()[1:]\n\n h = (q.inv() * quaternion.Quaternion.from_vec(np.array([1.0, 0, 0])) * q).as_ndarray()[1:]\n y = np.vstack(mag_body) - np.vstack(h)\n \n H = np.zeros((3, 9))\n ch2 = np.cos(heading/2)\n cr2 = np.cos(roll/2)\n sh2 = np.sin(heading/2)\n sr2 = np.sin(roll/2)\n H[0, 0] = 0\n H[0, 1] = 0\n H[0, 2] = -2.0*ch2*cr2**2*sh2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 2.0*ch2*sh2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 0] = 4.0*ch2*cr2*sh2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 1] = 2.0*ch2**2*cr2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 2.0*cr2*sh2**2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 2] = -1.0*ch2**2*cr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) + 1.0*ch2**2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) + 1.0*cr2**2*sh2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 1.0*sh2**2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n\n S = H.dot(self.P).dot(H.T) + self.Rmag\n K = self.P.dot(H.T).dot(np.linalg.inv(S))\n x = self.state_vec() + K.dot(y)\n\n self.P = (np.eye(9) - K.dot(H)).dot(self.P)\n self.set_state_vec(x)", "def quatLeftMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tL = np.zeros((4, 4))\n\tL[0, 0] = s\n\tL[0, 1:] = -v\n\tL[1:, 0] = v\n\tL[1:, 1:] = s*np.eye(3) + skewMat(v)\n\treturn L", "def _get_quaternion_data(self, msg):\n alpha, beta, gamma = PIDController.get_euler_angle_from_quat(msg.quaternion.w, msg.quaternion.x,\n msg.quaternion.y, msg.quaternion.z)\n self._actual_euler[\"alpha\"], self._actual_euler[\"beta\"], self._actual_euler[\"gamma\"] \\\n = alpha, beta, gamma", "def test_interpolate_quaternion():\n n_steps = 10\n random_state = np.random.RandomState(0)\n a1 = pr.random_axis_angle(random_state)\n a2 = pr.random_axis_angle(random_state)\n q1 = pr.quaternion_from_axis_angle(a1)\n q2 = pr.quaternion_from_axis_angle(a2)\n\n traj_q = [pr.quaternion_slerp(q1, q2, t)\n for t in np.linspace(0, 1, n_steps)]\n traj_R = [pr.matrix_from_quaternion(q) for q in traj_q]\n R_diff = np.diff(traj_R, axis=0)\n R_diff_norms = [np.linalg.norm(Rd) for Rd in R_diff]\n assert_array_almost_equal(R_diff_norms,\n R_diff_norms[0] * np.ones(n_steps - 1))" ]
[ "0.5886946", "0.5821129", "0.57884336", "0.5761411", "0.57510763", "0.56554824", "0.55509216", "0.5486164", "0.5485171", "0.5455863", "0.5419014", "0.5404133", "0.53984076", "0.5394962", "0.53919274", "0.5354592", "0.5353793", "0.5328789", "0.5326809", "0.5319791", "0.5303704", "0.5268657", "0.5208985", "0.5182829", "0.5171307", "0.5151622", "0.5137279", "0.5136986", "0.51343375", "0.5120013", "0.51113015", "0.51105934", "0.5108458", "0.5103988", "0.5095308", "0.50909436", "0.508318", "0.5079425", "0.5078169", "0.5069094", "0.5045954", "0.5033001", "0.5031777", "0.5012954", "0.49907875", "0.4985817", "0.49852434", "0.4969022", "0.49540532", "0.49499", "0.494933", "0.49485347", "0.4942533", "0.4939513", "0.4937445", "0.49134493", "0.49133557", "0.49110278", "0.4910977", "0.4898087", "0.48916262", "0.48875773", "0.4886824", "0.48811623", "0.48712456", "0.48673823", "0.4863673", "0.48635793", "0.48518792", "0.48433328", "0.48401964", "0.48348087", "0.4833206", "0.4831726", "0.48294997", "0.48256922", "0.48033243", "0.479905", "0.47919294", "0.47911605", "0.47891173", "0.47872657", "0.47849822", "0.4780648", "0.47716597", "0.47589058", "0.47524852", "0.4752352", "0.47455198", "0.47373128", "0.47364673", "0.4731773", "0.4731083", "0.47290683", "0.4728236", "0.4728236", "0.47142214", "0.4712336", "0.47114968", "0.47108915" ]
0.47717264
84
Search the corresponging page
def extract_table(path): re_ex = RE_EX pages = [] page_num = 1 with open(path, 'rb') as in_file: parser = PDFParser(in_file) doc = PDFDocument(parser) for page in PDFPage.create_pages(doc): rsrcmgr = PDFResourceManager() output_string = StringIO() device = TextConverter(rsrcmgr, output_string, laparams=LAParams()) interpreter = PDFPageInterpreter(rsrcmgr, device) interpreter.process_page(page) finder = re.search(re_ex, output_string.getvalue(), re.IGNORECASE) print('Searching table', '\tCurrent page:', page_num) if finder: print('Table finded.') pages.append(page_num) break page_num += 1 table = extract_text(path, pages) table = isolate(table) table = add_separations(table) return table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_page(request):\n if request.method == \"GET\":\n page = request.GET.get('q')\n entries = util.list_entries()\n entries_set=set(entries)\n\n if page in entries_set:\n return render(request, \"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(page),\n \"title\": page\n })\n \n else:\n results = list(filter(lambda x: page in x, entries))\n return render(request, \"encyclopedia/search_page.html\",{\n \"results\": results\n })", "def search(self, query):", "def page12(self):\n self.token_query = \\\n 'search'\n result = request1201.POST('/Cars_Sample_App/search.do' +\n '?query=' +\n self.token_query,\n ( NVPair('criteria', 'Bobble'),\n NVPair('x', '57'),\n NVPair('y', '5'), ),\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Content-Type', 'application/x-www-form-urlencoded'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result", "def search():\n pass", "def page11(self):\n self.token_query = \\\n 'search'\n result = request1101.POST('/Cars_Sample_App/search.do' +\n '?query=' +\n self.token_query,\n ( NVPair('criteria', 'Aston'),\n NVPair('x', '46'),\n NVPair('y', '19'), ),\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Content-Type', 'application/x-www-form-urlencoded'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 3 different values for token_cid found in response; the first matched\n # the last known value of token_cid - don't update the variable.\n\n grinder.sleep(95)\n request1102.GET('/Cars_Sample_App/images/cars/1.jpg', None,\n ( NVPair('Accept', '*/*'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result", "def search(query_string):", "def _search(dork): \n retVal = [] \n paths = [] \n\n if not dork: \n return None \n\n headers = {} \n\n headers[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT) \n headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE \n\n gpage = conf.googlePage if conf.googlePage > 1 else 1 \n\n#polluted by xi4okv QQ£º48011203 \n\n for gpage in xrange(1,10): \n logger.info(\"using search result page #%d\" % gpage) \n\n url = \"https://m.baidu.com/s?\" \n url += \"word=%s&\" % urlencode(dork, convall=True) \n url += \"&pn=%d\" % ((gpage - 1) * 10) \n\n try: \n req = urllib2.Request(url, headers=headers) \n conn = urllib2.urlopen(req) \n\n requestMsg = \"HTTP request:\\nGET %s\" % url \n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str \n logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) \n\n page = conn.read() \n code = conn.code \n status = conn.msg \n\n responseHeaders = conn.info() \n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\")) \n #print page \n\n responseMsg = \"HTTP response (%s - %d):\\n\" % (status, code) \n\n if conf.verbose <= 4: \n responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) \n elif conf.verbose > 4: \n responseMsg += \"%s\\n%s\\n\" % (responseHeaders, page) \n\n logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) \n except urllib2.HTTPError, e: \n pass \n\n urls = [urllib.unquote(match.group(0) or match.group(1)) for match in re.finditer(GOOGLE_REGEX, page, re.I)] \n #retVal = re.findall(GOOGLE_REGEX, page, re.I) \n\n import urlparse \n\n for url in urls: \n urls_pat = re.compile(r\"http://(.*)[^']\") \n aurl = re.findall(urls_pat, url) \n if \"?\" in url and \"baidu\" not in url: \n xpath = urlparse.urlparse(url).path \n if xpath not in paths: \n paths.append(xpath) \n retVal.append(aurl[0]) \n\n #print retVal \n\n return retVal", "def search_for(self,position,page_no):\n\t\tbot = self.bot\n\t\tcounter = 1\n\t\tunique_links = []\n\t\twhile page_no >= counter:\n\t\t\tstr_counter = str(counter)\n\t\t\tsearch_url = bot.get(URL+'search/results/people/?keywords='+position+'&origin=SWITCH_SEARCH_VERTICAL&page='+str_counter)\n\t\t\tprofiles = bot.find_elements_by_xpath(\"//a[@data-control-name='search_srp_result']\")\n\n\t\t\tfor person in profiles:\n\t\t\t\tlink = person.get_attribute('href')\n\t\t\t\tif link not in unique_links:\n\t\t\t\t\tunique_links.append(link)\n\n\t\t\tcounter += 1\n\t\t\ttime.sleep(5)\n\t\treturn unique_links", "def search(self, term):", "def page10(self):\n result = request1001.GET('/Cars_Sample_App/search.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/car.do?query=carEnquiries&cid=2'), ))\n\n return result", "def search(self, query, maxhits=100):", "def get_cuisine_search_pages(cuisine, page):\n link = SEARCH_URL.format(page, cuisine)\n cuisine_recipe_links = get_content_from_dynamic_url(link)\n if not cuisine_recipe_links:\n print \"no content for:\", link\n return None\n soup_search = BeautifulSoup(cuisine_recipe_links)\n return soup_search.find_all(\"h2\", {\"class\": \"node-title\"})", "def processSearchResult(self):", "def search(request):\n\n # get form data \n searchItem = request.GET.get(\"q\")\n # if searchItem is an exact match redirect to that page\n if (util.get_entry(searchItem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": searchItem\n }))\n # add any pages with the string in it to results list \n else: \n results = []\n substring = False\n for title in util.list_entries():\n if searchItem.upper() in title.upper():\n results.append(title)\n if results:\n substring = True\n # return results\n return render(request, \"encyclopedia/search.html\", {\n \"searchItem\": searchItem,\n \"substring\": substring,\n \"results\": results\n })", "def search_page():\n return render_template('page_query.html', search_label=g_search_type)", "def search(self, *args, **kwargs):", "def search(request):\n title = \"Voices search\"\n search_term = request.params.get('search_term','')\n form = Form(request)\n searchstring = u'%%%s%%' % search_term\n\n # generic_filter can be applied to all Node (and subclassed) objects\n\n generic_filter = or_(\n Content.title.like(searchstring),\n Content.body.like(searchstring),\n )\n\n results = DBSession.query(Content).filter(Content.type !='listing').filter(generic_filter).\\\n order_by(Content.title.asc()).all()\n\n\n page_url = PageURL_WebOb(request)\n page = int(request.params.get(\"page\", 1))\n paginator = Page(results,\n page=page,\n items_per_page=10,\n url=page_url)\n\n return render_to_response(\"buddy:templates/home/searchresult.mako\",\n dict(paginator=paginator,title=title,\n form=FormRenderer(form)),request=request)", "def search(request):\n raise NotImplementedError", "def search(self, q):\n self.__query = q\n self.scrape_page()", "def traverse_search_pages(self):\n self.wait_for_ajax()\n self.locator_finder_by_hover_item(self.move_second_page_id)\n time.sleep(2)\n self.wait_for_ajax()\n self.locator_finder_by_hover_item(self.move_first_page_id)\n time.sleep(2)", "def search_wikipedia(self):\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0',tk.END).replace(\"\\n\",\"\"))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state=\"normal\")\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state=\"disabled\")\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None", "def do_search(self):\n # Call the website\n self.driver.get(self.BASE_URL)\n\n # Request the proper historical data\n self.select_proper_version()\n self.save_image()\n\n # If the entity exists in this historical version, extract the data\n if self.select_proper_region() is True:\n # Do the search\n self.fill_search_parameters()\n # Scrap the results page\n self.scrap_results()\n # Export the data to .csv\n self.search_results.export()", "def __aux_search(self, url, page_limit):\n info = list()\n count = 1\n while True:\n try:\n print(\"[+] Getting page {} result\".format(count))\n if page_limit >= count:\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n count += 1\n if jdata and 'data' in jdata:\n info += jdata['data']\n if response and jdata.get('links', {}).get('next', '') != response.url:\n url = jdata['links']['next']\n else:\n break\n else:\n break\n except Exception as e:\n print(e)\n count += 1\n if page_limit >= count:\n break\n\n return info", "def search(self, search):\n raise NotImplementedError", "def query(self, page) -> [str, dict]:", "def search_results():\n search = False\n if session['patron']:\n search = False\n try:\n page = int(request.args.get('page', 1))\n except ValueError:\n page = 1\n\n search_criteria = request.args.get('search')\n patron_id = session['patron']\n session['search_criteria'] = search_criteria\n\n if search_criteria != '':\n print \"do a search\"\n list_of_books = booksearch.search(search_criteria, patron_id)\n pagination = Pagination(page=page, \n total=len(list_of_books), \n search=search, \n record_name='list_of_books')\n return render_template('book_list.html', search=search_criteria,\n list_of_books=list_of_books,\n pagination=pagination,\n )\n else:\n flash(\"Please enter an author or a title.\")\n return render_template('index.html')", "def _search(q: str, n: int):\n return search_client.retrieve([q], n)[0]", "def redirect_search(self, response):\n hxs = HtmlXPathSelector(response)\n\n yield Request(\n url=search_url,\n dont_filter=True,\n headers=self.headers,\n callback=self.parse_search\n )", "def search(self, key, headers=Headers()):", "def search_current_auctions(request):\n query = request.GET.get('q')\n auction = Auction.objects.all()\n\n if query:\n results = auction.filter(Q(antiques__name__icontains=query) | Q(antiques__description__icontains=query))\n\n else:\n results = Auction.objects.all()\n\n pages = pagination(request, results, num=4)\n context = {\n 'items': pages[0],\n 'page_range': pages[1]\n }\n\n return render(request, \"showallauctions.html\", context)", "def search():\n url = create_search_url()\n links = make_selenium_search(url)\n\n return links", "def search():\n # Get search request from GET requst\n query = str(request.query.q)\n print('Search query: ' + query)\n \n # Log to file\n try:\n with open(\"log.txt\", \"a\") as file:\n file.write(query + \"\\n\")\n except:\n print(\"Error saving log.txt!\")\n \n ## Search in the database\n search = es.search(\n index='webpages',\n doc_type='webpage',\n body={\n 'size': 25,\n \"fields\" : [\"title\", \"url\", \"description\"],\n 'query': {\n \"multi_match\" : {\n \"query\" : query,\n \"fields\" : [\"title^3\", \"url^5\", \"description^2\", \"content\"]\n }\n },\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\n \"pre_tags\" : [\"<b>\"],\n \"post_tags\" : [\"</b>\"],\n \"order\": \"score\",\n \"index_options\" : \"offsets\",\n \"fragment_size\" : 220,\n \"number_of_fragments\" : 1,\n \"require_field_match\" : \"false\"\n }\n }\n }\n }\n )\n\n ## Work through the results\n # Number of hits\n hits = search['hits']['total']\n\n # No points in continuing if there are no results..\n if hits == 0:\n return {'hits': 0}\n \n # Array containing results\n results = search['hits']['hits']\n\n cleanResults = list()\n \n # The 'results' array contain a lot of \"useless\" data,\n # here we work through it, and strip it down to the minimum\n for result in results:\n url = result['fields']['url']\n title = result['fields']['title']\n\n # If highlighting in the page body is available, set description to the highlighted paragraph\n # If no highlighting available, set the description to the description of the page (from its <meta> tag)\n try:\n description = result['highlight']['content']\n except:\n description = result['fields']['description']\n\n # Add the search result to the 'cleanResults' list\n cleanResults.append({\n 'url': url,\n 'title': title,\n 'description': description\n })\n ## Freebase\n # Try searching freebase for topics related to our query\n try:\n fb = freebase(query)\n except:\n # If topic doesnt exist in freebase, set fb = false\n # In the JavaScript, we can easily check if 'freebase == false'\n fb = False\n \n # Construct response\n response = {\n 'hits': hits,\n 'results': cleanResults,\n 'freebase': fb\n }\n\n return response", "def page13(self):\n result = request1301.GET('/Cars_Sample_App/sell.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result", "def navigate_search_results(self):\n driver = self.driver\n search_results_exhausted = False\n results_page = self.results_page\n delay = 60\n date = get_date_time()\n # css elements to view job pages\n list_element_tag = '/descendant::a[@class=\"job-title-link\"]['\n print_num_search_results(driver, self.keyword, self.location)\n # go to a specific results page number if one is specified\n go_to_specific_results_page(driver, delay, results_page)\n results_page = results_page if results_page > 1 else 1\n\n while not search_results_exhausted:\n for i in range(1,26): # 25 results per page\n # define the css selector for the blue 'View' button for job i\n job_selector = list_element_tag + str(i) + ']'\n if search_suggestion_box_is_present(driver, \n job_selector, i, results_page):\n continue\n # wait for the selector for the next job posting to load.\n # if on last results page, then throw exception as job_selector \n # will not be detected on the page\n if not link_is_present(driver, delay, \n job_selector, i, results_page):\n continue\n robust_wait_for_clickable_element(driver, delay, job_selector)\n extract_transform_load(driver,\n delay,\n job_selector,\n date,\n self.keyword,\n self.location,\n self.filename)\n # attempt to navigate to the next page of search results\n # if the link is not present, then the search results have been \n # exhausted\n try:\n next_results_page(driver, delay)\n print(\"\\n**************************************************\")\n print(\"\\n\\n\\nNavigating to results page {}\" \\\n \"\\n\\n\\n\".format(results_page + 1))\n except ValueError:\n search_results_exhausted = True\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearch results exhausted\\n\\n\\n\\n\\n\")\n else:\n results_page += 1", "def wikidata_search(request, str):\n url_head = 'https://www.wikidata.org/w/api.php?action=wbsearchentities&search='\n url_tail = '&language=en&format=json'\n if request.method == 'GET':\n r = requests.get(url_head+str+url_tail);\n return Response(r.json()['search'])\n #print r", "def search_item(search_term, next=False, page=0, board=0):\n if next == False:\n page = requests.get(\"https://www.nairaland.com/search?q=\" + urllib.parse.quote_plus(str(search_term)) + \"&board=\"+str(board))\n else:\n page = requests.get(\"https://www.nairaland.com/search/\"\n + str(search_term) + \"/0/\"+str(board)+\"/0/1\" + str(page))\n soup = BeautifulSoup(page.content, 'html.parser')\n\n comments = soup.findAll(\"div\", {\"class\": \"narrow\"})\n\n return comments", "def search():\n student_to_find=request.args.get(\"student\", None)\n print(f\"A buscar: {student_to_find}\")\n student_list=search_student(student_to_find)\n return render_template(\"search.html\",student_list_result=student_list)", "def main(url, MY_OUTWARD_TIME_MINI, MY_OUTWARD_TIME_MAXI=\"23:59\"):\n MY_OUTWARD_TIME_MINI = MY_OUTWARD_TIME_MINI.replace(\"h\", \":\")\n MY_OUTWARD_TIME_MAXI = MY_OUTWARD_TIME_MAXI.replace(\"h\", \":\")\n # Create the web browser object\n b = RB(history=True, allow_redirects=True)\n # Open the page\n b.open(url)\n # Find the next page to go\n res = str(b.select(\"#url_redirect_proposals\")[0])\n\n # # - First solution: manual search\n # offset = 4 + res.index('hid=')\n # length = 3\n # key = res[offset: offset + length]\n # print(\"key =\", key)\n # next_url = url1 + str(key)\n # print(\"1. Next url =\", next_url)\n # - Second solution: search with a regexp\n m = url_finder.search(res)\n next_url = m.string[m.start() : m.end()]\n print(\"Next url =\", next_url, \"...\")\n # Follow this url\n b.open(next_url)\n # Get the data.query part\n script = b.select(\"#vsc-preloaded-data-snippet\")[0]\n content = script.contents[0]\n\n # 1. Search for the query to display it nicely again\n m = query_finder.search(content)\n jsontext = m.string[m.start() : m.end()]\n # print(jsontext)\n beginning = \"data.query = JSON.parse('\"\n end = \"');\"\n query = jsontext[len(beginning) : -len(end)]\n jsonrawstr = query.replace(r\"\\\"\", '\"').replace(r\"\\'\", \"'\") # \\\" > \", \\' > '\n # print(jsonrawstr)\n jsonobj = json.loads(jsonrawstr)\n # print(json.dumps(jsonobj, sort_keys=True, indent=4))\n\n # 2. Search for the result\n m = searchResponse_finder.search(content)\n jsontext = m.string[m.start() : m.end()]\n # print(jsontext)\n beginning = \"data.searchResponse = JSON.parse('\"\n end = \"');\"\n searchResponse = jsontext[len(beginning) : -len(end)]\n # print(searchResponse)\n jsonrawstr = searchResponse.replace(r\"\\\"\", '\"').replace(\n r\"\\'\", \"'\"\n ) # \\\" > \", \\' > '\n # print(jsonrawstr)\n jsonobj = json.loads(jsonrawstr)\n # print(json.dumps(jsonobj, sort_keys=True, indent=4))\n\n \"\"\"\n with open('output.json', 'w+') as f:\n json.dump(jsonobj, f, sort_keys=True, indent=4)\n \"\"\"\n\n # 3. Affichage des horaires\n print(\"\\nDifferents horaires :\")\n horaires = [i[\"departureDate\"] for i in jsonobj[\"trainProposals\"]]\n print(horaires)\n for number, h in enumerate(horaires):\n print(\"Pour un train partant a :\", h)\n prices = jsonobj[\"trainProposals\"][number][\"priceProposals\"]\n if len(prices) > 0:\n prix = prices[0][\"amount\"]\n print(\"\\tPrix TGV minimum\", \"=\", prix, \"euros.\")\n else:\n print(\"\\tTrain complet.\")", "def test_search_1200(self):\n self.driver.get(self.domain)\n self.driver.maximize_window()\n self.assertTrue(u'TITLE' in\n self.driver.page_source, 'Title text not found')\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n wait = ui.WebDriverWait(self.driver, 5)\n search.clear()\n search.send_keys(\"XXXX\")\n search.submit()\n try:\n wait.until(lambda driver: u\"XXXX\" in\n self.driver.find_element_by_css_selector(\"xxxx > a\").text,\n 'Not found!')\n except:\n current_url = self.driver.current_url\n resp = requests.get(current_url)\n if resp.status_code != 200:\n raise Exception(\"Search failed! => [%s] %s\" % (resp.status_code,\n current_url))", "def test_search_720(self):\n self.driver.get(self.domain)\n self.assertTrue(u'XXXX' in\n self.driver.page_source, 'Title text not found')\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n wait = ui.WebDriverWait(self.driver, 5)\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n search.click()\n search_field = self.driver.find_element_by_css_selector(\"#XXXX\")\n search_field.send_keys(\"XXXX\")\n search_field.submit()\n try:\n wait.until(lambda driver: u\"XXXX\" in\n self.driver.find_element_by_css_selector(\"xxxx > a\").text,\n 'Not found!')\n except:\n current_url = self.driver.current_url\n resp = requests.get(current_url)\n if resp.status_code != 200:\n raise Exception(\"Search failed! => [%s] %s\" % (resp.status_code,\n current_url))", "def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })", "def search_url(url):\r\n\r\n surl = url + '/search-middle-ns.asp'\r\n driver.get(surl)\r\n\r\n return", "def search():\r\n return render_template(\"/home/search.html\")", "def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def go_search(self, driver, pid):\n return [self.search_url(website, pid)]", "def get_page(search):\n headers = {\n \"User-Agent\":\n \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0\",\n }\n url = 'http://google.com/search?h1=en&q=' + search + \"&meta=&gws_rd=ssl\"\n page = requests.get(url, headers=headers)\n return page", "def search():\n return {\n \"status\": \"UP\",\n }, 200", "def get_search_results(self):\n sleep(10)\n try:\n addresses = self.driver.find_elements_by_class_name('details-title')\n for p in range(len(addresses)):\n address.append(addresses[p].text)\n prices = self.driver.find_elements_by_class_name('price-info')\n for p in range(len(prices)):\n price.append(prices[p].text)\n links = self.driver.find_element_by_tag_name('a.details-titleLink jsCardLinkGA')\n for p in range(len(links)):\n link.append(links[p].text)\n except NoSuchElementException:\n sleep(3)\n self.pop_up()", "def search_book():\n\n title = request.form.get(\"search\")\n books = book_search_results(GR_KEY, title)\n acct = get_current_account(session['acct'])\n search = True\n\n return render_template(\"index.html\", books=books, acct=acct, search=search)", "def fetch(url):\r\n PAGES = {\"http://SEARCH_QUERY_URL?&page=1\" : SEARCH_RESULT_PAGE1,\r\n \"http://SEARCH_QUERY_URL?&page=2\" : SEARCH_RESULT_PAGE2} \r\n return PAGES[url]", "def find(self):\n if self.start >= SMAX:\n return 4\n link = \"http://www.google.com/search?q={}&start={}\".format(self.qon[1], self.start)\n try:\n fobj = self.opener.open(link)\n except HTTPError:\n self.update(\"Google banned you.\", \"\")\n return 3\n except timeout:\n self.update(\"Timed out or Google banned you.\", \"\")\n return 3\n else:\n data = fobj.read() # google's source\n fobj.close()\n # find a relevant closest position to the link\n index1 = data.find(self.first)\n if index1 == -1: # no results in page or modified pattern\n return 1 # invalid source\n self.start += 1 # now do the increment\n index1 += len(self.first)\n index2 = data.find(self.second, index1)\n url = data[index1:index2]\n # edit url\n newurl = \"\"\n i = 0\n length = len(url)\n while i < length:\n if url[i] == \"%\":\n char = chr(int(url[i + 1] + url[i + 2], 16))\n i += 2\n else:\n char = url[i]\n newurl += char\n i += 1\n url = newurl\n # process it\n if url in self.seen: # link already visited\n return 2\n self.seen.add(url)\n upo = urlparse(url)\n self.update(\"Looking in %s...\" % upo.netloc, \"\")\n try:\n fobj = self.opener.open(url)\n except URLError:\n self.update(\"Invalid link.\", \"\")\n return 2\n except timeout:\n self.update(\"Timed out.\", \"\")\n return 3\n else:\n self.data = fobj.read()\n self.dataLen = len(self.data)\n fobj.close()\n return 0 # all fine", "def search(self, value):\n self.base_selenium.set_text(element='general:search', value=value)\n self.base_selenium.click(element='general:search')\n time.sleep(self.base_selenium.TIME_MEDIUM)\n return self.result_table()", "async def search(self, *args, **kwargs):\n pass", "def search_main() -> None:\n\n logger.info(\"Starting search\")\n links = run_search(grab_search_info())\n if links:\n logger.info(\"Printing links\")\n for key in links:\n print(f\"{key.upper()}: {links[key]}\")", "def search(self, address='', url=True):\n baseurl = 'https://www.redfin.com/'\n try:\n self.driver.get(baseurl)\n if not address:\n print(f'---- testing {self.driver.current_url}')\n return None\n search_input = self.driver.find_element_by_xpath(\n '//input[@type=\"search\"]')\n search_input.send_keys(address)\n search_btn = self.driver.find_element_by_xpath(\n '//button[@data-rf-test-name=\"searchButton\"]')\n search_btn.click()\n self.driver.find_element_by_xpath(\n '//span[@itemprop=\"streetAddress\"]')\n result = self.driver.current_url\n self.detail_statu = True\n self.log.debug('---- Property page : %s', result)\n if url:\n return result\n except NoSuchElementException as e:\n self.log.info('---- No such element for : \"%s\"', address)\n return None\n except Exception as e:\n self.log.error('---- Search Error : %s', e)\n result = 'None'\n if url:\n return result", "def _search(client, search_string):\n if search_string is None:\n logger.info(uxstring.UxString.list_all, fg=\"green\")\n\n current_page = 0\n total_pages = get_search_results(client, search_string, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n next_page = get_next_page(prompt_resp, current_page)\n if next_page == -1:\n model_id = prompt_resp\n display_search_info(client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n elif next_page != current_page:\n get_search_results(client, search_string, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def go_to_search():\n\tuser_id = session.get(\"user_id\")\n\tuser = User.query.filter_by(user_id=user_id).first()\n\n\treturn render_template(\"/nowsearch.html\", user=user)", "def search_results(self):\r\n route_name = self.request.matched_route.name\r\n mdict = self.matchdict\r\n rdict = self.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n # Always search the fulltext content\r\n with_content = True\r\n\r\n conn_str = self.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n params = self.params\r\n page = params.get('page', 0)\r\n count = params.get('count', 50)\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif self.request.user and self.request.user.username:\r\n username = self.request.user.username\r\n\r\n res_list = searcher.search(\r\n phrase,\r\n content=with_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page,\r\n )\r\n\r\n # if the route name is search_ajax we want a json response\r\n # else we just want to return the payload data to the mako template\r\n if 'ajax' in route_name or 'api' in route_name:\r\n return {\r\n 'success': True,\r\n 'message': \"\",\r\n 'payload': {\r\n 'search_results': [dict(res) for res in res_list],\r\n 'result_count': len(res_list),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }\r\n }\r\n else:\r\n return {\r\n 'search_results': res_list,\r\n 'count': len(res_list),\r\n 'max_count': 50,\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }", "def search(request):\n if 'q' in request.GET:\n term = request.GET['q']\n story_list = Story.objects.filter(Q(title__contains=term)|Q(markdown_content__contains=term))\n heading = \"Search results\"\n return render_to_response(\"cms/story_list.html\",locals())", "def search(self, q, start = 0, num = 10):\n\n\t\turl = 'http://www.google.com/xhtml?'\n\t\tquery = urllib.urlencode({'q':q, 'start':start, 'num':num})\n\n\t\tresult = self.get(url + query)\n\t\tcontent = result.read()\n\n\t\ttokens = re.findall(\n\t\t\t'<a\\s+accesskey=\"\\d+\"\\s+href=\"(.*?)\"\\s*>(.*?)</a>(.*?)<span class=\"c2\">',\n\t\t\tcontent)\n\n\t\tresults = []\n\n\t\tfor token in tokens:\n\t\t\turl = token[0][token[0].index(';u=') + 3:]\n\t\t\ttitle = token[1]\n\t\t\texcerpt = token[2]\n\n\t\t\tlogging.debug('Search.search - found url ' + url)\n\n\t\t\tresults.append((urllib.unquote(url), title, excerpt))\n\n\t\treturn results", "def search():\n search = request.form.get(\"search\")\n results = mongo.db.recipes.find({\"$text\": {\"$search\": search}}).limit(2)\n result_count = mongo.db.recipes.find(\n {\"$text\": {\"$search\": search}}).count()\n if result_count > 0:\n return render_template(\"pages/search.html\", results=results, search=search, isFooter=True)\n else:\n flash(\"No results found.\")\n return render_template(\"pages/search.html\", results=results, search=search, isFooter=True)", "def search(query):\n\tprint('-> \tSeraching -> {}'.format(query))\n\told_html = driver.find_element_by_tag_name('html').text\n\ts = driver.find_element_by_name('q')\n\ts.send_keys(query)\n\ts.send_keys(Keys.ENTER) \n\treturn wait_for(old_html)", "def search(self):\n premium = self.config.get('premium', False)\n\n self.params[self.opts['keyword']['query_key']] = self.config[self.opts['keyword']['config_key']] # keyword\n # Selection params\n self.append_param('tag_mode', 'selection')\n if premium:\n self.append_param('order_premium', 'selection')\n else:\n self.append_param('order_not_premium', 'selection')\n\n self.append_param('type', 'selection')\n self.append_param('tool', 'selection')\n self.append_param('ratio', 'selection')\n self.append_param('mode', 'selection')\n\n # Number params\n self.append_param('min_width', 'number')\n self.append_param('max_width', 'number')\n self.append_param('min_height', 'number')\n self.append_param('max_height', 'number')\n if premium:\n self.append_param('min_bookmark', 'number')\n self.append_param('max_bookmark', 'number')\n else:\n self.set_bookmark_filter()\n\n # Date params\n self.append_param('start_time', 'date')\n self.append_param('end_time', 'date')\n\n # multi work filter\n self.filters['multi'] = self.config.get('download_multi', False)\n\n for i in range(self.config['start_page'], self.config['end_page'] + 1):\n self.params['p'] = i\n self.headers['Referer'] = 'https://www.pixiv.net/'\n url ='https://www.pixiv.net/search.php'\n html = self.session.get(url, headers = self.headers, params = self.params, timeout = 10, proxies = self.proxies)\n\n soup = BeautifulSoup(html.text, 'lxml')\n data_items = json.loads(soup.find('input', id = 'js-mount-point-search-result-list')['data-items'])\n\n return self.extract_work_info(data_items)", "def search():\n\n # Make sure user is logged in\n if \"username\" not in session:\n return render_template(\"index.html\", message=\"Please login to view that page!\")\n\n if request.method == \"POST\":\n # Get search form\n search = request.form.get(\"search\").strip()\n search_partial = search + '%'\n\n books = db.execute(\"SELECT * FROM books WHERE isbn LIKE :search OR author LIKE :search OR title LIKE :search\", {\"search\": search_partial}).fetchall()\n\n return render_template(\"search.html\", username=session[\"username\"], search=search, books=books)\n # Actually you cant get this without being logged in\n return render_template(\"search.html\")", "def profiles_search(pages: list, session: requests.Session):\n result = []\n for page in pages:\n sleep(1)\n soup = BeautifulSoup(session.get(page).text, \"lxml\")\n table = soup.find(\"table\", class_=\"seaman-list-table va-top seaman-list-table-2\")\n for rows in table.find_all(\"tr\")[1:]:\n for column in rows.find(\"td\", 'seaman-name-td'):\n try:\n result.append(\"https://ukrcrewing.com.ua\" + column[\"href\"])\n except TypeError:\n continue\n logger.info(f'{len(result)} profiles detected')\n return result", "def search_antiques(request):\n query = request.GET.get('q')\n\n if query:\n results = Antiques.objects.filter(Q(name__icontains=query) | Q(description__icontains=query))\n else:\n results = Antiques.objects.all()\n pages = pagination(request, results, num=4)\n\n context = {\n 'items': pages[0],\n 'page_range': pages[1],\n 'query': query,\n }\n\n return render(request, \"antiques.html\", context)", "def page_body():\r\n st.header(\"Search\")\r\n st.subheader(\"Search For SMEs With A Few Different Options\")\r\n\r\n search_mode_selection = st.radio(\r\n help=\"Search For SMEs That Have Particular Connections, Titles, Or Names...\",\r\n label=\"Search By\",\r\n options=(SearchMode.Connection.value, SearchMode.JobTitle.value, SearchMode.Name.value),\r\n )\r\n\r\n search_form = st.form(key=\"search_form\", clear_on_submit=False)\r\n search_query = search_form.text_input(label=\"\", value=\"Search...\", max_chars=50)\r\n search_button = search_form.form_submit_button(label=\"Search\")\r\n\r\n if search_button:\r\n results = get_search_results(search_query, SearchMode[str(search_mode_selection).replace(\" \", \"\")])\r\n\r\n # Loop through the results returned from the database query\r\n for result in results:\r\n result_dict = result.to_dict() # Convert internally to a Python dict\r\n\r\n # dict keys here are actually database keys in Firestore. You would need to be signed in to see the proper values\r\n with st.expander(result_dict[\"name\"] + \" - \" + str(result_dict[\"age\"]) + \" years old\"):\r\n st.header(result_dict[\"name\"])\r\n st.write(result_dict[\"jobTitle\"])\r\n\r\n st.subheader(\"Personal Summary\")\r\n st.write(result_dict[\"personalSummary\"])\r\n\r\n if result_dict[\"companyName\"]:\r\n st.subheader(\"Works At\")\r\n st.write(result_dict[\"companyName\"])\r\n\r\n if result_dict[\"connections\"]:\r\n st.subheader(result_dict[\"name\"] + \"'s Connections\")\r\n st.write(\", \".join(result_dict[\"connections\"]))", "def search(request):\n if 'q' in request.GET:\n term = request.GET['q'].lower()\n thispushqueryset = pushitem.objects.filter(Q(searchfield__contains= term) )\n message = _('Searching for %s')%str(term)\n else:\n thispushqueryset = pushitem.objects.none()\n message = _('No search query specified')\n r = makepage(request,thispushqueryset,{'search_query':request.GET['q'].lower(), 'showall': 1,'message':message,}, template='search.html')\n return r", "def query(url):", "def search(self, search_params):\n if self.db.is_data_set():\n return self.db.search(search_params)\n else:\n self.crawler.initialize()\n # return self.db.search(search_params)", "def test_paged_search(self):\n search_dn = \"ou=nerdherd,%s\" % self.basedn\n res = self.conn.search(search_dn, 1, page_size=2)\n for ent in res:\n self.assertIsInstance(ent, bonsai.LDAPEntry)\n page = 1 # First page already is acquired.\n while True:\n if len(res) > 2:\n self.fail(\"The size of the page is greater than expected.\")\n msgid = res.acquire_next_page()\n if msgid is None:\n break\n res = self.conn.get_result(msgid)\n page += 1\n self.assertEqual(page, 3)", "def search(self, what, cat='all'):\n # Sign in:\n if self.search_auth:\n self._sign_in()\n opener = self.opener\n else:\n opener = urllib2.build_opener(urllib2.BaseHandler())\n ret = []\n page = 0\n while page < self.PAGE_NUMBER:\n results = []\n parser = self.FilelistParser(results, self.url)\n url = self.url+'/browse.php?search=%s&cat=%s&searchin=0&sort=0&page=%d'%(what, self.supported_categories[cat], page)\n f = opener.open(url)\n dat = f.read().decode('iso-8859-1', 'replace')\n results_re = re.compile(\"(?s)<div class='cblock-innercontent'>.*\")\n for match in results_re.finditer(dat):\n res_tab = match.group(0)\n parser.feed(res_tab)\n parser.close()\n break\n if len(results) <= 0:\n break\n page += 1", "def search():\n # Check for database tables\n check_db()\n # Check for GET data\n search_query = request.args.get(\"q\", None)\n # Format search results as HTML\n search_results = get_search_results_html(search_query)\n # Format recent searches as HTML\n recent_searches = get_recent_searches_html()\n\n return html_wrapper('<h1>' + SITE_NAME + '''</h1>\n <form action=\"/\" method=\"GET\">\n <input type=\"text\" name=\"q\">\n <input type=\"submit\" value=\"search\">\n </form>''' + search_results + recent_searches)", "def hyperlink_search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif 'UniProtKB Accession' in request.GET and request.GET['UniProtKB Accession'] or \\\n\t'Protein' in request.GET and request.GET['Protein'] or \\\n\t'Gene' in request.GET and request.GET['Gene'] or \\\n\t'Organism' in request.GET and request.GET['Organism'] or \\\n\t'Organismid' in request.GET and request.GET['Organismid'] or \\\n\t'SubCellular' in request.GET and request.GET['SubCellular'] or \\\n\t'Peptide Sequence' in request.GET and request.GET['Peptide Sequence'] or \\\n\t'Pathway Name' in request.GET and request.GET['Pathway Name'] or \\\n\t'Disease Name' in request.GET and request.GET['Disease Name'] or \\\n\t'Go ID' in request.GET and request.GET['Go ID'] or \\\n\t'Go Name' in request.GET and request.GET['Go Name'] or \\\n\t'Go Term' in request.GET and request.GET['Go Term'] or \\\n\t'AssayFdaApproveMark' in request.GET and request.GET['AssayFdaApproveMark']:\n\t\tuseruniprotkb =\"\"\n\t\tuserprotein =\"\"\n\t\tusergeneid =\"\"\n\t\tuserorg=\"\"\n\t\tuserorgid=\"\"\n\t\tusersubcell =\"\"\n\t\tuserpepseq =\"\"\n\t\tuserpathway =\"\"\n\t\tuserdis =\"\"\n\t\tusergoid =\"\"\n\t\tusergotn =\"\"\n\t\tusergot=\"\"\n\t\tuserassayfdaapprovemark=\"\"\n\t\tfinalsearhdata=''\n\t\ttry:\n\t\t\tuseruniprotkb = request.GET[\"UniProtKB Accession\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in useruniprotkb:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('|')\n\t\telse:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('\\\\n')\n\t\tuseruniprotkb=[(item.strip()).lower() for item in useruniprotkb]\n\t\tuseruniprotkb=map(str, useruniprotkb)\n\t\tuseruniprotkb=filter(None, useruniprotkb)\n\n\t\ttry:\n\t\t\tuserprotein = request.GET[\"Protein\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userprotein:\n\t\t\tuserprotein=(userprotein.strip()).split('|')\n\t\telse:\n\t\t\tuserprotein=(userprotein.strip()).split('\\\\n')\n\t\tuserprotein=[(item.strip()).lower() for item in userprotein]\n\t\tuserprotein=map(str, userprotein)\n\t\tuserprotein=filter(None, userprotein)\n\n\t\ttry:\n\t\t\tusergeneid = request.GET[\"Gene\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergeneid:\n\t\t\tusergeneid=(usergeneid.strip()).split('|')\n\t\telse:\n\t\t\tusergeneid=(usergeneid.strip()).split('\\\\n')\n\t\tusergeneid=[(item.strip()).lower() for item in usergeneid]\n\t\tusergeneid=map(str, usergeneid)\n\t\tusergeneid=filter(None, usergeneid)\n\n\t\ttry:\n\t\t\tuserorg = request.GET[\"Organism\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorg:\n\t\t\tuserorg=(userorg.strip()).split('|')\n\t\telse:\n\t\t\tuserorg=(userorg.strip()).split('\\\\n')\n\t\tuserorg=[(item.strip()).lower() for item in userorg]\n\t\tuserorg=map(str, userorg)\n\t\tuserorg=filter(None, userorg)\n\n\t\ttry:\n\t\t\tuserorgid = request.GET[\"Organismid\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorgid:\n\t\t\tuserorgid=(userorgid.strip()).split('|')\n\t\telse:\n\t\t\tuserorgid=(userorgid.strip()).split('\\\\n')\n\t\tuserorgid=[(item.strip()).lower() for item in userorgid]\n\t\tuserorgid=map(str, userorgid)\n\t\tuserorgid=filter(None, userorgid)\n\n\t\ttry:\n\t\t\tusersubcell = request.GET[\"SubCellular\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usersubcell:\n\t\t\tusersubcell=(usersubcell.strip()).split('|')\n\t\telse:\n\t\t\tusersubcell=(usersubcell.strip()).split('\\\\n')\n\t\tusersubcell=[(item.strip()).lower() for item in usersubcell]\n\t\tusersubcell=map(str, usersubcell)\n\t\tusersubcell=filter(None, usersubcell)\n\n\t\ttry:\n\t\t\tuserpepseq = request.GET[\"Peptide Sequence\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpepseq:\n\t\t\tuserpepseq=(userpepseq.strip()).split('|')\n\t\telse:\n\t\t\tuserpepseq=(userpepseq.strip()).split('\\\\n')\n\t\tuserpepseq=[(item.strip()).lower() for item in userpepseq]\n\t\tuserpepseq=map(str, userpepseq)\n\t\tuserpepseq=filter(None, userpepseq)\n\n\t\ttry:\n\t\t\tuserpathway = request.GET[\"Pathway Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpathway:\n\t\t\tuserpathway=(userpathway.strip()).split('|')\n\t\telse:\n\t\t\tuserpathway=(userpathway.strip()).split('\\\\n')\n\t\tuserpathway=[(item.strip()).lower() for item in userpathway]\n\t\tuserpathway=map(str, userpathway)\n\t\tuserpathway=filter(None, userpathway)\n\n\t\ttry:\n\t\t\tuserdis = request.GET[\"Disease Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userdis:\n\t\t\tuserdis=(userdis.strip()).split('|')\n\t\telse:\n\t\t\tuserdis=(userdis.strip()).split('\\\\n')\n\t\tuserdis=[(item.strip()).lower() for item in userdis]\n\t\tuserdis=map(str, userdis)\n\t\tuserdis=filter(None, userdis)\n\n\t\ttry:\n\t\t\tusergoid = request.GET[\"Go ID\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergoid:\n\t\t\tusergoid=(usergoid.strip()).split('|')\n\t\telse:\n\t\t\tusergoid=(usergoid.strip()).split('\\\\n')\n\t\tusergoid=[(item.strip()).lower() for item in usergoid]\n\t\tusergoid=map(str, usergoid)\n\t\tusergoid=filter(None, usergoid)\n\n\t\ttry:\n\t\t\tusergotn = request.GET[\"Go Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergotn:\n\t\t\tusergotn=(usergotn.strip()).split('|')\n\t\telse:\n\t\t\tusergotn=(usergotn.strip()).split('\\\\n')\n\t\tusergotn=[(item.strip()).lower() for item in usergotn]\n\t\tusergotn=map(str, usergotn)\n\t\tusergotn=filter(None, usergotn)\n\n\t\ttry:\n\t\t\tusergot = request.GET[\"Go Term\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergot:\n\t\t\tusergot=(usergot.strip()).split('|')\n\t\telse:\n\t\t\tusergot=(usergot.strip()).split('\\\\n')\n\t\tusergot=[(item.strip()).lower() for item in usergot]\n\t\tusergot=map(str, usergot)\n\t\tusergot=filter(None, usergot)\n\n\t\ttry:\n\t\t\tuserassayfdaapprovemark = request.GET[\"AssayFdaApproveMark\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userassayfdaapprovemark:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('|')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\telse:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('\\\\n')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\tuserassayfdaapprovemark=[(item.strip()).lower() for item in userassayfdaapprovemark]\n\t\tuserassayfdaapprovemark=map(str, userassayfdaapprovemark)\n\t\tuserassayfdaapprovemark=filter(None, userassayfdaapprovemark)\n\n\t\tspquerylist =[]\n\t\tsearchtermlist=[]\n\n\t\tif len(useruniprotkb) >0:\n\t\t\tfinalsearhdata+='UniProtKB Accession:'+';'.join(useruniprotkb)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in useruniprotkb:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"UniProtKB Accession.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userprotein)> 0:\n\t\t\tfinalsearhdata+='Protein:'+';'.join(userprotein)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userprotein:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Protein.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergeneid) >0:\n\t\t\tfinalsearhdata+='Gene:'+';'.join(usergeneid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergeneid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Gene.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorg) > 0:\n\t\t\tfinalsearhdata+='Organism:'+';'.join(userorg)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorg:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorgid) > 0:\n\t\t\tfinalsearhdata+='Organism ID:'+';'.join(userorgid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorgid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usersubcell) >0:\n\t\t\tfinalsearhdata+='SubCellular:'+';'.join(usersubcell)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usersubcell:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"SubCellular.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpepseq) >0:\n\t\t\tfinalsearhdata+='Peptide Sequence:'+';'.join(userpepseq)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpepseq:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Peptide Sequence.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpathway) >0:\n\t\t\tfinalsearhdata+='Pathway Name:'+';'.join(userpathway)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpathway:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Pathway Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userdis) >0:\n\t\t\tfinalsearhdata+='Disease Name:'+';'.join(userdis)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userdis:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Disease Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergoid) >0:\n\t\t\tfinalsearhdata+='Go ID:'+';'.join(usergoid)+' '\n\t\t\tsdict={}\n\t\t\tsdict[\"Go ID.ngram\"]=[i.split(' ')[0] for i in usergoid]\n\t\t\ttdict={}\n\t\t\ttdict[\"terms\"]=sdict\n\t\t\tsearchtermlist.append(tdict)\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergoid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]+={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergotn) >0:\n\t\t\tfinalsearhdata+='Go Name:'+';'.join(usergotn)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergotn:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergot) > 0:\n\t\t\tfinalsearhdata+='Go Term:'+';'.join(usergot)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergot:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Term.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(userassayfdaapprovemark) > 0:\n\t\t\tfinalsearhdata+='Assays for FDA approved Marker::'+';'.join(userassayfdaapprovemark)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userassayfdaapprovemark:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Assays for FDA approved Marker.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(searchtermlist)>0:\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\n\t\t\tquery={\n\t\t\t\t\"query\": {\n\t\t\t\t\t\"bool\": {\n\t\t\t\t\t\t\"must\":searchtermlist\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnameFIle=names.get_first_name()\n\t\t\tjsonfilename=nameFIle+'_advance_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\t\t\t\t\t\n\t\t\t\t\tjfinaldata.append(jdic)\n\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata)\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1]))\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items())[:10])\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries']))\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'statsummary', jsonfilename)\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dumps(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/adavancesearch/results/\"+jsonfilename+\"'\"\n\t\t\t\tcontextindex={\n\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t}\n\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})", "def search_results():\n skip = int(flask.request.args.get(\"skip\", \"0\"))\n limit = int(flask.request.args.get(\"limit\", \"20\"))\n\n obj = {}\n\n # query : will be event kit in case of triage information\n uidstr = flask.request.args.get(\"query\", None)\n\n if uidstr == None:\n obj[\"error\"] = \"Missing search ID\"\n\n uidstr = json.loads(uidstr)\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uidstr\n obj[\"clips\"] = []\n states = backend.get_search_sessions()\n obj[\"sessions\"] = []\n for astate in states:\n obj[\"sessions\"].append(str(astate))\n try:\n uid = uuid.UUID(uidstr)\n state = backend.get_iqr_search_state(uid)\n # use the uid of the state and get the information from the database\n col = str(state.uuid)\n obj[\"collection\"] = col\n searchdb[col].ensure_index([(\"model_id\", pymongo.ASCENDING),(\"probability\", pymongo.DESCENDING) ])\n # Force probabilities\n obj[\"positives\"] = list(state.positives)\n obj[\"negatives\"] = list(state.negatives)\n log = \"\"\n for id in state.positives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 1.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 1.0001\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n\n for id in state.negatives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 0.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 0.0\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n obj[\"log\"] = log\n\n allres = searchdb[col].find({\"model_id\" : \"FUSION\"}).sort([(\"probability\", pymongo.DESCENDING)]).skip(skip).limit(limit)\n rank = skip + 1\n for one in allres:\n aclip = {}\n aclip[\"score\"] = one[\"probability\"]\n aclip[\"id\"] = \"HVC\" + str(one[\"clip_id\"]).zfill(6)\n clipobj = db[\"clips\"].find_one({\"id\" : \"HVC\" + str(one[\"clip_id\"]).zfill(6)},{\"duration\" : 1})\n aclip[\"duration\"] = clipobj[\"duration\"]\n aclip[\"rank\"] = rank\n rank = rank + 1\n obj[\"clips\"].append(aclip)\n obj[\"count\"] = len(obj[\"clips\"])\n\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"next\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid, \"skip\" : skip+limit } )\n return jsonify(obj)", "def _search(self, query_obj, num, after, reverse, count=0):\r\n builder = SearchBuilder(query_obj,\r\n after = after, num = num, reverse = reverse,\r\n count = count,\r\n wrap = ListingController.builder_wrapper)\r\n\r\n listing = LinkListing(builder, show_nums=True)\r\n\r\n # have to do it in two steps since total_num and timing are only\r\n # computed after fetch_more\r\n res = listing.listing()\r\n timing = time_module.time() - builder.start_time\r\n\r\n return builder.total_num, timing, res", "def search(self, word):", "def search_for_books(main_page): # Add information to the printout if the book is rented\n\n type_of_search = 0\n\n header = \"\"\"\n Do you want to search for books by the first letter of the title\n or by the type?\n \"\"\"\n search_choices= (\n (\"To search by letter\", search_by_letter),\n (\"To search by type\", search_by_type),\n (\"To exit\",exit.exit_to_main)\n )\n\n book_search = Screen(header,search_choices,\n main_page.login, main_page.password)\n book_search.activate()", "def get_search_results(text, out_file=None, num_res=3):\n # specify the source website\n text += ' site:tableau.com'\n text = urllib.parse.quote_plus(text)\n\n url = 'https://google.com/search?q=' + text\n USER_AGENT = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\n \n # TODO: add delay here?\n response = requests.get(url,headers=USER_AGENT)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n result_block = soup.find_all('div', attrs={'class': 'g'})\n\n final_result = []\n for rb_ind in range(len(result_block)):\n if len(final_result)==num_res:\n # done sraping\n break\n \n rb = result_block[rb_ind]\n # print(rb_ind)\n if rb.find('h3'):\n title = rb.find('h3').text\n link = rb.find('a', href=True)['href']\n\n desc = rb.find(class_='IsZvec').text\n \n if not desc:\n # print(rb_ind)\n # print(\"got here\")\n desc = rb.find(class_='ILfuVd')\n if desc:\n desc = desc.text\n else:\n desc = ''\n final_result.append([title,link,desc])\n print('\\n'.join([title,link,desc]))\n\n if out_file is not None:\n with open(out_file,\"a+\",encoding='utf8') as f:\n f.writelines([r + '\\n' for r in final_result])\n \n return final_result", "def page21(self):\n result = request2101.GET('/Cars_Sample_App/supercars.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/car.do?query=carEnquiries&cid=26'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'manu'\n # 14 different values for token_mid found in response; the first matched\n # the last known value of token_mid - don't update the variable.\n\n return result", "def _search(dork):\n\n if not dork:\n return None\n\n data = None\n headers = {}\n\n headers[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT)\n headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE\n\n try:\n req = urllib2.Request(\"https://www.google.com/ncr\", headers=headers)\n conn = urllib2.urlopen(req)\n except Exception as ex:\n errMsg = \"unable to connect to Google ('%s')\" % getSafeExString(ex)\n raise SqlmapConnectionException(errMsg)\n\n gpage = conf.googlePage if conf.googlePage > 1 else 1\n logger.info(\"using search result page #%d\" % gpage)\n\n url = \"https://www.google.com/search?\"\n url += \"q=%s&\" % urlencode(dork, convall=True)\n url += \"num=100&hl=en&complete=0&safe=off&filter=0&btnG=Search\"\n url += \"&start=%d\" % ((gpage - 1) * 100)\n\n try:\n req = urllib2.Request(url, headers=headers)\n conn = urllib2.urlopen(req)\n\n requestMsg = \"HTTP request:\\nGET %s\" % url\n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str\n logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg)\n\n page = conn.read()\n code = conn.code\n status = conn.msg\n responseHeaders = conn.info()\n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\"))\n\n responseMsg = \"HTTP response (%s - %d):\\n\" % (status, code)\n\n if conf.verbose <= 4:\n responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING)\n elif conf.verbose > 4:\n responseMsg += \"%s\\n%s\\n\" % (responseHeaders, page)\n\n logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg)\n except urllib2.HTTPError as ex:\n try:\n page = ex.read()\n except Exception as _:\n warnMsg = \"problem occurred while trying to get \"\n warnMsg += \"an error page information (%s)\" % getSafeExString(_)\n logger.critical(warnMsg)\n return None\n except (urllib2.URLError, httplib.error, socket.error, socket.timeout, socks.ProxyError):\n errMsg = \"unable to connect to Google\"\n raise SqlmapConnectionException(errMsg)\n\n retVal = [urllib.unquote(match.group(1) or match.group(2)) for match in re.finditer(GOOGLE_REGEX, page, re.I)]\n\n if not retVal and \"detected unusual traffic\" in page:\n warnMsg = \"Google has detected 'unusual' traffic from \"\n warnMsg += \"used IP address disabling further searches\"\n\n if conf.proxyList:\n raise SqlmapBaseException(warnMsg)\n else:\n logger.critical(warnMsg)\n\n if not retVal:\n message = \"no usable links found. What do you want to do?\"\n message += \"\\n[1] (re)try with DuckDuckGo (default)\"\n message += \"\\n[2] (re)try with Bing\"\n message += \"\\n[3] quit\"\n choice = readInput(message, default='1')\n\n if choice == '3':\n raise SqlmapUserQuitException\n elif choice == '2':\n url = \"https://www.bing.com/search?q=%s&first=%d\" % (urlencode(dork, convall=True), (gpage - 1) * 10 + 1)\n regex = BING_REGEX\n else:\n url = \"https://duckduckgo.com/html/\"\n data = \"q=%s&s=%d\" % (urlencode(dork, convall=True), (gpage - 1) * 30)\n regex = DUCKDUCKGO_REGEX\n\n try:\n req = urllib2.Request(url, data=data, headers=headers)\n conn = urllib2.urlopen(req)\n\n requestMsg = \"HTTP request:\\nGET %s\" % url\n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str\n logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg)\n\n page = conn.read()\n code = conn.code\n status = conn.msg\n responseHeaders = conn.info()\n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\"))\n\n responseMsg = \"HTTP response (%s - %d):\\n\" % (status, code)\n\n if conf.verbose <= 4:\n responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING)\n elif conf.verbose > 4:\n responseMsg += \"%s\\n%s\\n\" % (responseHeaders, page)\n\n logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg)\n except urllib2.HTTPError as ex:\n try:\n page = ex.read()\n page = decodePage(page, ex.headers.get(\"Content-Encoding\"), ex.headers.get(\"Content-Type\"))\n except socket.timeout:\n warnMsg = \"connection timed out while trying \"\n warnMsg += \"to get error page information (%d)\" % ex.code\n logger.critical(warnMsg)\n return None\n except:\n errMsg = \"unable to connect\"\n raise SqlmapConnectionException(errMsg)\n\n retVal = [urllib.unquote(match.group(1).replace(\"&amp;\", \"&\")) for match in re.finditer(regex, page, re.I | re.S)]\n\n if not retVal and \"issue with the Tor Exit Node you are currently using\" in page:\n warnMsg = \"DuckDuckGo has detected 'unusual' traffic from \"\n warnMsg += \"used (Tor) IP address\"\n\n if conf.proxyList:\n raise SqlmapBaseException(warnMsg)\n else:\n logger.critical(warnMsg)\n\n return retVal", "def search():\n\n # TO DO: refine with wildcard to curb superfluous results\n \n # logged in users can search for books\n # via 'isbn', 'author', or 'title'\n query = request.form.get(\"search\")\n if not query:\n return render_template(\"home.html\", result=0, name=session[\"name\"],result_head=\"Results\")\n \n # query 'isbn'\n if query.isdigit():\n res = db.execute(\"SELECT * FROM books WHERE isbn LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n else:\n # query 'author'\n res = db.execute(\"SELECT * FROM books WHERE author LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n # If no result from author, query 'title'\n if len(res) == 0:\n res = db.execute(\"SELECT * FROM books WHERE title LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n if len(res) == 0:\n res = 0\n return render_template(\"home.html\", result=res, name=session[\"name\"], result_head=\"Results\")", "def search(self, query: str, page: int = 1) -> list:\r\n\r\n raw = self.session.get(f\"{self.host}/page/{page}\", params={\r\n \"s\": query})\r\n soup = self.soup(raw)\r\n\r\n result = []\r\n for artikel in soup.findAll(\"article\"):\r\n a = artikel.find(\"a\")\r\n if not a.img:\r\n continue\r\n result.append({\r\n \"title\": a[\"title\"],\r\n \"id\": self.getPath(a[\"href\"])\r\n })\r\n return result", "def search(user, param):\r\n if len(param) <= 2:\r\n return bad_request(error_messages['too_short'])\r\n return search_user(param.lower(), user)", "def test_paged_search_with_auto_acq(self):\n client = LDAPClient(self.url)\n conn = client.connect()\n search_dn = \"ou=nerdherd,%s\" % self.basedn\n res = conn.search(search_dn, 1, page_size=3)\n if len(res) != 3:\n self.fail(\"The size of the page is not what is expected.\")\n entry = 0\n for ent in res:\n self.assertIsInstance(ent, bonsai.LDAPEntry)\n entry += 1\n self.assertEqual(entry, 6)\n self.assertIsNone(res.acquire_next_page())", "def searchpageparsing(page): # Note for initial Coldwell this was run seperately, for more managable errors\n if not page: # Failed webdl handling\n return None\n\n soup = bs4.BeautifulSoup(page.text, 'lxml')\n parent_element = soup.find('ul', {'class': 'expanded-nav'})\n link_parent = parent_element.find('li')\n link_el = link_parent.find('a')\n link = link_el['href']\n\n return link", "def page22(self):\n self.token_mid = \\\n '8'\n result = request2201.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n\n return result", "def search(self, case_numbers=[], **kwargs):\n site = Site(self.place_id)\n logger.info(\n \"Executing search for {}\".format(self.place_id)\n )\n data = site.search(case_numbers=case_numbers)\n return data", "def page(self, request):\n draw = request.GET.get('draw', 0)\n length = int(request.GET.get('length', 5))\n start = int(request.GET.get('start', 0))\n order_column = int(request.GET.get('order[0][column]', 0))\n order_direction = request.GET.get('order[0][dir]', 'asc')\n search_keyword = request.GET.get('search[value]', '')\n raise NotImplementedError", "def parse_search_page(self, response):\n ###############################################################\n search_name_url_xpath = '//*[@id=\"dnn_dnnLEFTMENU_RadPanel1\"]/ul/li/div/ul/li[2]/a/@href'\n ###############################################################\n search_name_url = response.xpath(search_name_url_xpath).extract_first()\n yield scrapy.Request(response.urljoin(search_name_url), callback = self.parse_search_name_page)", "def go_search_results(self, driver, searchlink):\n self.go_and_assert(driver, searchlink, website)", "def __update_page_results(self):\n \n pages = []\n\n # Request id for pages associated to search term \n page_fields='page&fields=id,name,username,link'\n term = self.track[self.track_index]\n self.track_index += 1\n \n # Define url for http request to get pages id associated to search term \n page_request_url = 'https://graph.facebook.com/search?q=%s&type=%s&limit=%d&access_token=%s'%(term,page_fields,self.page_lim,self.access_token)\n \n while(True):\n # Try 100 times\n for i in range(100):\n \n page_response = requests.get(page_request_url)\n \n if 'error' in page_response.json() or page_response.status_code <> 200:\n print \"\\n !---- ERROR IN SEARCH REQUEST ----!\"\n print time.ctime()\n print \"Status Code: \", page_response.status_code\n print page_response.json()\n #raise StopIteration()\n time.sleep(1800) # Wait 30 minutes\n else:\n break\n \n page_json = page_response.json()\n pages = pages + page_json['data']\n time.sleep(5)\n \n if 'next' in page_json['paging']:\n page_request_url = page_json['paging']['next']\n else:\n break\n \n print \"Term: %s, Pages: %d\"%(term, len(pages))\n return pages", "def __get_page(self):\n params = {'query': self.query,\n 'corpname': self.subcorpus,\n 'start': self.__pagenum}\n s = get('http://korpus.juls.savba.sk:8080/manatee.ks/do_query', params=params)\n return s.text", "def getSearch(self, authenticationToken, guid):\r\n pass", "def parse_listing(keyword, place):\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}\".format(keyword, place)\n print(\"retrieving \", url)\n\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'www.paginegialle.it',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n }\n try:\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page\")\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n else:\n print(\"Failed to process page exit with no results exit code: 213\")\n return []\n except:\n print(\"Failed to process page exit with no results exit code: 222\")\n return []\n\n XPATH_RESULTS = \"//div[@class=' container containerListato ']//span[@class='searchResNum']//text()\"\n raw_RESULTS = listings[0].xpath(XPATH_RESULTS)\n resultsn = ''.join(raw_RESULTS).strip().replace(\"risultati\",\"\") if raw_RESULTS else None\n print(\"results found for query {0} {1} - {2}\".format(keyword,place,resultsn))\n page_number = int(int(resultsn)/20) #20 is the number of result for single web page\n print(\"number of web page to parse: {0}\".format(page_number))\n\n scraped_results = []\n if page_number == 1 or page_number == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n return scraped_results\n if page_number > 1: \n for retry in range(page_number):\n if retry == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n else:\n time.sleep(5)\n try:\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}/p-{2}\".format(keyword,place,retry)\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page {0}\".format(retry))\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n break\n else:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results\n\n except:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results \n return scraped_results", "def test_ajax_search(self):\r\n # first let's add a bookmark we can search on\r\n self._get_good_request()\r\n search_res = self.testapp.get(\r\n '/admin/results/google',\r\n headers={\r\n 'X-Requested-With': 'XMLHttpRequest',\r\n 'Accept': 'application/json'\r\n }\r\n )\r\n\r\n self.assertTrue(\r\n search_res.status == '200 OK',\r\n \"Status is 200: \" + search_res.status)\r\n\r\n self.assertTrue(\r\n 'my google desc' in search_res.body,\r\n \"We should find our description on the page: \" + search_res.body)\r\n\r\n # also check for our specific json bits\r\n self.assertTrue(\r\n 'success' in search_res.body,\r\n \"We should see a success bit in the json: \" + search_res.body)\r\n\r\n self.assertTrue(\r\n 'payload' in search_res.body,\r\n \"We should see a payload bit in the json: \" + search_res.body)\r\n\r\n self.assertTrue(\r\n 'message' in search_res.body,\r\n \"We should see a message bit in the json: \" + search_res.body)", "def parseSearchHtml(self):\n pass", "def parseSearchHtml(self):\n pass", "def full_search(pw, *arg, **kw):\n return pw.search(*arg, **kw)", "def scrap_results(self):\n # Find the table\n table = self.driver.find_element_by_xpath(results_table_path)\n\n found_links = []\n # For each row the table hase\n for row in table.find_elements_by_xpath(\".//tr\"):\n elements = row.find_elements_by_xpath(\".//td\")\n # If this row is not empty\n if len(elements) != 0:\n # Extract the link\n entity_link = elements[0].find_element_by_xpath(\".//a\").get_attribute(\"href\")\n found_links.append(entity_link)\n\n # Randomize the list of links so each time the order is different.\n shuffle(found_links)\n\n generic_data_found = []\n activity_data_found = []\n components_data_found = []\n components_alt_data_found = []\n historical_name_data_found = []\n historical_social_capital_data_found = []\n count = 0\n # For each link found\n for link in found_links:\n # Scrap the data from this entity\n gd, act, comp, hist_name, hist_c_s = self._scrap_single_entity(link)\n\n # Update the found data variables with the new data\n generic_data_found.append(gd)\n activity_data_found += act\n if len(comp) > 0 and \"total_miembros_patronado\" in comp[0]:\n components_alt_data_found += comp\n else:\n components_data_found += comp\n historical_name_data_found += hist_name\n historical_social_capital_data_found += hist_c_s\n\n # TODO: Remove this\n if count == 2:\n pass\n\n\n count += 1\n\n # Add data to the centralized search_result variable\n self.search_results.add_generic_data(generic_data_found)\n self.search_results.add_activity_data(activity_data_found)\n self.search_results.add_components_data(components_data_found)\n self.search_results.add_components_alt_data(components_alt_data_found)\n self.search_results.add_historical_names_data(historical_name_data_found)\n self.search_results.add_historical_social_capital_data(historical_social_capital_data_found)" ]
[ "0.68104583", "0.6787762", "0.6677038", "0.6644082", "0.6601781", "0.65885496", "0.6568652", "0.64640963", "0.64026564", "0.6390516", "0.63853854", "0.6381454", "0.6347054", "0.63387513", "0.63099885", "0.62858623", "0.62771076", "0.6269942", "0.6268018", "0.6258665", "0.62542", "0.62157404", "0.617878", "0.61767", "0.6169865", "0.61490476", "0.61427575", "0.61153054", "0.6111999", "0.6098294", "0.60931665", "0.6082391", "0.6066026", "0.6061658", "0.6059402", "0.60581815", "0.60443604", "0.60402894", "0.60322213", "0.60284907", "0.6024509", "0.60224307", "0.6022422", "0.5998657", "0.59979564", "0.5993939", "0.59889734", "0.5978325", "0.5976037", "0.5971877", "0.59622204", "0.5954433", "0.5953469", "0.5950101", "0.5948719", "0.59444153", "0.59419835", "0.59356374", "0.59285086", "0.592195", "0.5921746", "0.59190226", "0.59112024", "0.59082943", "0.5905524", "0.5903544", "0.58996624", "0.589855", "0.5897873", "0.5889121", "0.587223", "0.5872111", "0.5860972", "0.58523315", "0.585006", "0.58498377", "0.5825842", "0.58248913", "0.5823733", "0.58236086", "0.58209336", "0.5814681", "0.58010453", "0.57936454", "0.5780181", "0.577919", "0.57757795", "0.5770859", "0.57660943", "0.5757956", "0.5756518", "0.5754516", "0.5752634", "0.5744713", "0.57431287", "0.57385397", "0.5735466", "0.5733074", "0.5733074", "0.57320935", "0.572518" ]
0.0
-1
Extract Raw text from PDF
def extract_text(path, pages): out = [] with open(path, 'rb') as file: pdftotext_string = pdftotext.PDF(file) for i in pages: out.append(pdftotext_string[i - 1]) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_pdf(path):\n\n #only reading from pdf files\n\n text = textract.process(filename = path, encoding = \"ascii\")\n\n\n text.replace(\"\\n\", \" \")\n text.replace(\"\\t\", \" \")\n text.replace(\"\\r\", \" \")\n filter(lambda x: x in set(string.printable), text)\n\n return text", "def parsepdf(intext): # type: (str) -> str\n\n pdfbinarydata = base64.b64decode(intext.strip())\n pdfFileObj = io.BytesIO()\n pdfFileObj.write(pdfbinarydata)\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n extractedText = ''\n for i in range(0, pdfReader.numPages):\n pageObj = pdfReader.getPage(i)\n extractedText = extractedText + pageObj.extractText()\n\n return extractedText.strip()", "def get_data_from_pdf(self, regex):\n match = re.search(regex, self.page_text)\n return match.group(0).replace(\" \", \"\").replace(\"\\n\", \"\")", "def pdf_to_txt(full_path):\n file = open(full_path,'rb')\n extracted_text = parser.from_buffer(file)\n return extracted_text['content']", "def extract_text_from_pdf(file):\n\n return RegexpTokenizer(r'\\w+').tokenize(parser.from_file(file)['content'])", "def extract_text(filename):\n\tpdfFileObj = open(filename, \"rb\")\n\tpdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n\n\tmytext = \"\"\n\n\tfor pageNum in range(pdfReader.numPages):\n\t\tpageObj = pdfReader.getPage(pageNum)\n\t\tmytext += pageObj.extractText()\n\n\tpdfFileObj.close()\n\n\treturn mytext", "def textify(read_pdf,spage,epage):\n\n page_text = \"\"\n for page in range(spage, epage):\n page_content = read_pdf.getPage(page)\n page_text += page_content.extractText()\n\n full_text = page_text #.encode('utf-8')\n return full_text", "def get_text_from_pdf(self, path):\n os.system(\"pdftotext {} tmp.txt > /dev/null\".format(path))\n with open('tmp.txt') as f:\n self.text = f.read()\n os.remove('tmp.txt')", "def readFile(self):\n with pdfplumber.open(self.path) as pdf:\n first_page = pdf.pages[0]\n text = first_page.extract_text()\n text = text.split('\\n')\n return processText(text)", "def get_text(self):\r\n if os.path.isfile(self.destination):\r\n pdf = self.read_from_text()\r\n else:\r\n pdf = self.convert_to_text\r\n self.save_text_to_file(pdf)\r\n return pdf", "def convert_pdf_to_txt(pdf):\n stdout = subprocess.Popen([\"pdftotext\", \"-q\", pdf, \"-\"],\n stdout=subprocess.PIPE).communicate()[0]\n return stdout", "def read_from_text(self) -> str:\r\n with open(self.destination, 'r', encoding='utf8') as f:\r\n pdf = f.read()\r\n return pdf", "def raw_text(self):\n\t\t\n\t\t #eliminating more headers\n\t\traw_text = re.sub(r\".*OPERATIONS O[PF].*\",r\"\",self.doc)\n\t\traw_text = re.sub(r\"Page \\d+\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*B[lL]OCK.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*WEST GULF.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*NAVAL FORCES ON.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\"\\s\",r\" \", raw_text) #eliminating tabs etc. \t \t \n\t\treturn raw_text", "def pdf_link_to_text(pdf_attachment):\n print(\"Downloading \" + str(pdf_attachment))\n filename = download_pdf_file(pdf_attachment)\n print(\"Extracting text from \" + str(filename))\n pdf_text = convert_pdf_to_text(filename)\n os.remove(filename)\n return pdf_text", "def extract(key, path_pdf):\n\n path_tmp_pdf = extract_first_page(path_pdf)\n\n # extract all text from first page\n raw_text = extract_text(path_tmp_pdf)\n\n # extract abstract from whole page and replace hyphens etc.\n abstract = extract_abstract(raw_text)\n\n # something went wrong when abstract is longer than 1500 chars\n if len(abstract) > MAX_LEN:\n print('{}: Abstract is too long.'.format(path_pdf))\n\n if not abstract:\n print('{}: Could not extract abstract.'.format(path_pdf))\n\n # clean up temp file\n os.unlink(path_tmp_pdf)\n\n # TODO: Fix this return object\n out = {'@key': key, 'abstract': abstract}\n\n return out", "def pdf_to_text(file_object):\n pdfData = file_object.read()\n tf = tempfile.NamedTemporaryFile()\n tf.write(pdfData)\n tf.seek(0)\n outputTf = tempfile.NamedTemporaryFile()\n\n if len(pdfData) > 0:\n out, err = subprocess.Popen([\"pdftotext\", \"-layout\", tf.name, outputTf.name ]).communicate()\n return outputTf.read()\n else:\n return None", "def test_read_text_of_non_indexed_pdf_without_ocr(pdf_path):\n pdf = PdfReader(path=pdf_path)\n assert pdf.read_text(allow_ocr=False) is None", "def extract_and_process(input_dir, pdf_path, json_output):\n\tprint('Extracting text from: ', pdf_path)\n\toutput_dir = input_dir + \"/output\"\n\ttry:\n\t\t# Extract PDF to HTML format\n\t\textracted_text = txt_ext.extract_pdf_to_html(pdf_path)\n\t\t# Write raw HTML\n\t\t#pre_proc.create_text_file(output_dir + \"/raw_\" + path_leaf(pdf_path) + \".html\", extracted_text)\n\t\t\n\t\tprint(\"Extraction finished: \"+ pdf_path + \", starting processing\")\n\t\tprocess(extracted_text, output_dir, path_leaf(pdf_path), json_output)\n\n\texcept PDFSyntaxError:\n\t\tprint(\"PDFSyntaxError: Is this really a PDF? \", pdf_path)\n\texcept PDFTextExtractionNotAllowed as e:\n\t\tprint(e)", "def test_read_text(pdf_path):\n pdf_reader = PdfReader(path=pdf_path)\n text = pdf_reader.ocr_text()\n\n # We hard code this comparison to keep track of all changes to this metric\n assert pdf_reader.mean_confidence == 89\n assert pdf_reader.page_confidences == [86, 91]\n\n # Check if we have two pages seperated by pagebreaks\n assert len(text.split('\\f')) == 2\n\n # The same content can be extracted from the pages property\n assert '\\f'.join(pdf_reader.pages) == text\n\n # Content on the first page (important that this is at the beginning)\n assert 'Norwegian University of Science and Technology' in text[:50]\n\n # Content on second page (important that this is at the end)\n assert 'two requirements' in text[-50:]\n\n # The double-f in affine is hard for bad OCR algorithms\n assert 'affine' in text", "def extract_text(fname):\n\n laparams = pdfminer.layout.LAParams()\n for param in ('all_texts', 'detect_vertical', 'word_margin', 'char_margin', 'line_margin', 'boxes_flow'):\n paramv = locals().get(param, None)\n if paramv is not None:\n setattr(laparams, param, paramv)\n\n # send output to a string stream\n outfp = io.StringIO()\n\n with open(fname, 'rb') as fp:\n pdfminer.high_level.extract_text_to_fp(fp, outfp=outfp, codec='utf-8',\n laparams=laparams, pages=0)\n\n return outfp.getvalue()", "def extract_text(infile):\n # Get text from mudraw\n text = subprocess.check_output(['mudraw', '-F', 'txt', infile])\n\n # Cleanup raw text\n match = re.search(\n r'.*?Activity \\/ Remarks(?P<table1>.*?)Activities not shown on the ' +\n r'DABS Chart Side:.*?Activity \\/ Remarks(?P<table2>.*?)For detailed ' +\n r'information regarding the DABS',\n text,\n re.MULTILINE | re.DOTALL)\n if not match:\n raise ExtractionError('Could not extract text from PDF.')\n false_or_none_string = lambda x: bool(x) and x.lower() != 'none'\n data = '\\n\\n\\n'.join(match.groups())\n raw_parts = re.sub(r'\\n[ \\t]+\\n', '\\n\\n', data).split('\\n\\n\\n')\n parts = filter(false_or_none_string, map(lambda x: x.strip(), raw_parts))\n\n # Write CSV\n headers = (\n b'Firing-Nr\\nD-/R-Area\\nNOTAM-Nr',\n b'Validity UTC',\n b'Lower Limit\\nAMSL or FL',\n b'Upper Limit\\nAMSL or FL',\n b'Location',\n b'Center Point',\n b'Covering Radius',\n b'Activity / Remarks',\n )\n rows = []\n for i, part in enumerate(parts):\n # Regexes\n multiple_newlines_re = re.compile(r'\\n+')\n height_re = re.compile(r'(GND|[0-9]+m \\/ [0-9]+ft|FL[0-9]{2,3}|REF AIP)')\n center_radius_re = re.compile(r'([0-9]{6}N [0-9]{7}E)\\s+?(.*?NM)')\n\n # Separate columns (warning: hackish code ahead!)\n row = {}\n step1 = re.split(r'([0-2][0-9][0-6][0-9] - [0-2][0-9][0-6][0-9])', part)\n row['nr'] = step1[0].strip()\n timestring = '\\n'.join(step1[1:-1])\n row['validity'] = multiple_newlines_re.sub('\\n', timestring)\n step2 = filter(None, height_re.split(step1[-1].strip()))\n row['lower'] = step2[0]\n row['upper'] = step2[2]\n step3 = filter(None, center_radius_re.split(step2[-1].strip()))\n row['location'] = step3[0].strip()\n row['center'] = step3[1].strip()\n row['radius'] = step3[2].strip()\n row['activity'] = multiple_newlines_re.sub('\\n', step3[3].strip())\n\n # Add to list of rows\n rows.append((\n row['nr'].encode('utf8'),\n row['validity'].encode('utf8'),\n row['lower'].encode('utf8'),\n row['upper'].encode('utf8'),\n row['location'].encode('utf8'),\n row['center'].encode('utf8'),\n row['radius'].encode('utf8'),\n row['activity'].encode('utf8'),\n ))\n\n return tablib.Dataset(*rows, headers=headers)", "def pdf_to_text(self, f):\n cmd = [\"pdftohtml\", \"-zoom\", \"1.35\", \"-xml\", \"-stdout\", f.name]\n code, stdout, stderr = self.shell(cmd)\n if code > 0:\n raise ValueError(stderr)\n return stdout.decode('utf-8')", "def convert_pdf_to_text(pdf_path):\n process_id = os.getpid()\n resource_manager = PDFResourceManager()\n output = StringIO.StringIO()\n laparams = LAParams(detect_vertical=True)\n device = TextConverter(\n resource_manager,\n output,\n codec='utf-8',\n laparams=laparams\n )\n interpreter = PDFPageInterpreter(resource_manager, device)\n file_handler = file(pdf_path, 'rb')\n pages = PDFPage.get_pages(file_handler)\n\n for idx, page in enumerate(pages):\n print(\"Page \" + str(idx + 1), end='\\r')\n sys.stdout.flush()\n interpreter.process_page(page)\n print()\n\n data = output.getvalue()\n data = data.replace('\\n', ' ')\n data = data.replace('\\t', ' ')\n data = data.replace('\\r', ' ')\n data = data.replace('\\x0c', ' ')\n\n return data", "def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text", "def test_extract_pdf_prev():\n\n test_pdf_path = 'tests/files/research/fea48178ffac3a42035ed27d6e2b897cb570cf13.pdf'\n text = pdf_util.extract_pdf_text_prev(test_pdf_path)\n\n assert text\n assert \"Yoshiyuki\" in text", "def digital_text(file_path):\n doc = fitz.open(file_path)\n page_count = doc.pageCount\n print(\"\\n number of pages : \",page_count)\n total_text = \"\"\n try:\n for page_num in range(page_count):\n p = doc.loadPage(page_num)\n page_text = p.getText()\n total_text += page_text\n print(\"\\n number of pages extracted : \", (page_count))\n except Exception as e:\n print(\"\\n Error in digital_text : \", traceback.format_exc(()))\n return total_text", "def mo_parse_pdf(self, filepath):\n\n text = textract.process(filepath, encoding='utf-8')\n text = text.decode('utf-8')\n\n if 'PRESSURE CALIBRATION DATA' in text:\n self.mo_parse_p(filepath)\n\n elif 'TEMPERATURE CALIBRATION DATA' or 'CONDUCTIVITY CALIBRATION DATA' in text:\n self.mo_parse_ts(text)\n\n else:\n pass", "def test_read_text_of_text_indexed_pdf(allow_ocr, monkeypatch):\n # This is a PDF which contains indexed text\n pdf_path = Path(__file__).parent / 'data' / 'matmod_exam_des_2017.pdf'\n\n # So the OCR method should never be called\n monkeypatch.delattr('examiner.pdf.PdfReader.ocr_text')\n\n # Now we read the indexed text\n pdf = PdfReader(path=pdf_path)\n text = pdf.read_text(allow_ocr=allow_ocr)\n\n # Ensure unicode string\n assert isinstance(text, str)\n\n # Check content\n assert 'Rottman' in text\n assert 'population model' in text\n assert 'this is not in the exam' not in text", "def detect_text(path):\r\n from google.cloud import vision\r\n import io\r\n client = vision.ImageAnnotatorClient()\r\n\r\n with io.open(path, 'rb') as image_file:\r\n\r\n content = image_file.read()\r\n image = vision.types.Image(content=content)\r\n response = client.document_text_detection(image = image)\r\n docText = response.full_text_annotation.text\r\n docText = docText.replace(\"\\n\", \"\")\r\n return (docText)", "def _extract_kiss_text(self, raw_slice):\n self.text = self.frame[raw_slice + 3:]", "def convert_to_text(self) -> str:\r\n import pdftotext\r\n with open(self.pdf_path, \"rb\") as f:\r\n pdf = pdftotext.PDF(f)\r\n pdf = \"\\n\\n\".join(pdf)\r\n return pdf", "def get_parsed_text(blob):\n return blob.parse()", "def text_preprocessing_pdf(self,p):\n #remover_end_paragraphs=np.vectorize(self.remove_end_paragraphs,otypes=[str])\n cleaner=np.vectorize(self.remove_non_alpha,otypes=[str])\n cut_text=np.vectorize(self.cut_text,otypes=[str])\n cut_text_raw=np.vectorize(self.cut_text_raw,otypes=[str])\n assert len(self.parser)==len(self.parser_raw), \"Length of the treated sentence treated list does not match length of raw text list: {} / {}\".format(len(self.parser),len(self.parser_raw))\n cut_text_raw(p)\n p=cleaner(p)\n cut_text(p)\n return p", "def do_single_file_preprocess(pdf_file):", "def test_read_text_of_non_indexed_pdf_with_ocr(monkeypatch, pdf_path):\n monkeypatch.setattr(PdfReader, 'ocr_text', lambda self: 'content')\n pdf = PdfReader(path=pdf_path)\n assert pdf.read_text(allow_ocr=True) == 'content'", "def obtain_text():\n pass", "def process_pdf(path):\r\n str = \"\"\r\n try:\r\n pages = layout_scanner.get_pages(path) \r\n i = 0\r\n l = len(pages)\r\n while i < l: \r\n str += pages[i]\r\n i += 1\r\n except Exception, e:\r\n return g_error_template % e, \"\" \r\n \r\n return \"\", str", "def read_pdf_file(file):\n return pdftotext.PDF(file)", "def extractText(self, filename):\n file_path = os.path.join(folder_upload, filename)\n file_text = self.textExtractor.get_text(file_path)\n return file_text", "def from_pdf(path):\n raw_regexes = [\n r\"\"\"<prism:doi>(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</prism:doi>\"\"\",\n r\"\"\"[\"'](?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)[\"']\"\"\",\n r\"\"\"URI\\s*\\(https?://doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n r\"\"\"URI\\s*\\((?:https?://)?www.nature.com/doifinder/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n # This one works for some ACIE papers, but is too risky. It matches\n # against DOIs of cited papers too. Better to use WPS-ARTICLEDOI.\n # r\"\"\"/URI\\(https?://(?:dx)?.doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"/WPS-ARTICLEDOI\\s*\\((10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"\\((?:doi|DOI):\\s*(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"<rdf:li.+>(?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</rdf:li>\"\"\",\n ]\n regexes = [re.compile(regex) for regex in raw_regexes]\n class _DOIFound(Exception):\n pass\n\n p = Path(path)\n if not (p.exists() or p.is_file()):\n return _error(f\"from_pdf: invalid path '{p}' given\")\n\n strings = subprocess.Popen([\"strings\", p], stdout=subprocess.PIPE)\n grep = subprocess.Popen([\"grep\", \"-i\", \"doi\"], stdin=strings.stdout, stdout=subprocess.PIPE)\n try:\n for line in grep.stdout:\n line = line.decode(_g.gpe).strip()\n for regex in regexes:\n match = regex.search(line)\n if match:\n raise _DOIFound(match.group(1))\n except _DOIFound as e:\n doi = e.args[0]\n # Prune away any extra parentheses at the end.\n nopen = doi.count('(')\n nclose = doi.count(')')\n if nopen != nclose:\n doi = doi.rsplit(')', maxsplit=(nclose - nopen))[0]\n # Report success.\n return DOI(doi)\n else:\n return _error(f\"from_pdf: could not find DOI from '{p}'\")", "def pdf2text(sourcePath, destinationPath, logDir):\n TextRepresentation.logger.info(\"Converting pdf document %s\" % sourcePath)\n\n cmdList = ['pdftotext', '-raw', '-layout', '-enc', 'UTF-8', '-eol', 'unix', '-nopgbrk']\n convertString = \"Converting pdf: \" + sourcePath + \" into text.\"\n \n TextRepresentation.logger.info(str(cmdList) + \"\\n\" + sourcePath + \"\\n\" + destinationPath)\n\n retCode, stdout, stderr = AsrtSubprocess.execute(cmdList + [sourcePath, destinationPath], logDir)\n \n if retCode == 0:\n TextRepresentation.logger.info(\"Success: \" + convertString)\n else: \n TextRepresentation.logger.critical(\"Failure: \" + convertString)\n raise Exception(\"Error converting pdf: \" + sourcePath)", "def extractText(text):\n soup = BeautifulSoup(text, 'html.parser')\n for code in soup.find_all('code'):\n code.decompose()\n return soup.get_text()", "def pdf2text(path: str, pages = set()) -> str:\n \n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n codec = 'utf-8'\n laparams = LAParams(\n char_margin=100,\n line_margin=0.25,\n word_margin=0.1,\n boxes_flow = 0.9)\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n fp = open(path, 'rb')\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n password = \"\"\n maxpages = 0\n caching = True\n pagenos=pages\n\n for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):\n interpreter.process_page(page)\n\n text = retstr.getvalue()\n\n fp.close()\n device.close()\n retstr.close()\n return text", "def get_full_text_from_source(self):\n extension = self.get_doc_file_extension()\n\n if extension in ('txt', ''):\n # string = unicode(string)\n return self.doc_file.read().decode(\"utf-8\")\n elif extension == 'docx':\n docx_document = Docx(BytesIO(self.doc_file.read()))\n return \"\\n\".join(p.text for p in docx_document.paragraphs)\n elif extension == 'pdf':\n raise NotImplementedError()\n else:\n raise ValueError(\"file_format not supported\")", "def _pdf_to_txt(file_path, dst_dir, file_name):\n if file_name is None:\n file_name = os.path.split(file_path)[1]\n file_dst = os.path.join(dst_dir, re.sub(r'\\.pdf$', '.txt', file_name))\n return subprocess.call([\"pdftotext\", \"-layout\", file_path, file_dst])", "def get_document_text(document_id, session=konfuzio_session()):\n url = get_document_api_details_url(document_id)\n r = retry_get(session, url)\n text = r.json()['text']\n if text is None:\n logger.warning(f'Document with ID {document_id} does not contain any text, check OCR status.')\n else:\n logger.info(f'Document with ID {document_id} contains {len(text)} characters.')\n\n return text", "def extract_text(fpath):\n try:\n files = {'document': open(fpath, 'rb')}\n headers = {'Content-type': 'application/octet-stream'}\n response = requests.put(TIKA_URL, files=files, headers=headers)\n if not response:\n logging.error('Status: %d', response.status_code)\n return None\n return response.content\n except Exception as error: # pylint: disable=broad-except\n logging.error(error)\n return None", "def extract_text(self, record):\n # type: (Element) -> str\n cdm_struc = Fields.cdm_structural_elements\n structure_el = record.find(cdm_struc['compound_object_container'])\n pages_el = structure_el.iterfind('.//' + cdm_struc['compound_object_page'])\n fulltext = ''\n for page in pages_el:\n page_el = page.find(cdm_struc['compound_object_page_text'])\n if page_el is not None:\n if page_el.text is not None:\n page_text = Utils.correct_text_encoding(page_el.text)\n fulltext += page_text\n return fulltext", "def _extract_raw_url(self) -> str:\n soup = BeautifulSoup(self.content.content, self._parser)\n pdf_tag = soup.select_one(self.pdf_tag_selector)\n if pdf_tag is None:\n raise PdfTagNotFoundException(f\"No pdf tag was found in the given content \"\n f\"with the selector: {self.pdf_tag_selector}\")\n raw_url = pdf_tag.attrs.get(self.pdf_tag_attr)\n if raw_url is None:\n raise PdfUrlNotFoundException(f\"No pdf url was found in the pdf tag: {pdf_tag.get_text()} \"\n f\"with the attr {self.pdf_tag_attr}\")\n return raw_url", "def process_pdf(filename, qualies_only=False):\n if filename.endswith('.txt'):\n f = open(filename)\n text = f.read()\n f.close()\n else:\n text = subprocess.check_output([\"pdftotext\", \"-layout\",\n filename, \"-\"]).decode('utf-8')\n\n print(\"Processing {}...\".format(filename))\n\n pages = text.split(chr(12))\n print (\"{} Pages\".format(len(pages)))\n md = []\n qd = []\n for p in pages:\n if ('MAIN DRAW SINGLES' in p or 'Singles Championship' in p\n or 'Ladies\\' Singles' in p):\n md += [p]\n elif ('QUALIFYING SINGLES' in p or 'Qualifying Singles' in p\n or 'Qualifying Ladies\\' Singles' in p):\n qd += [p]\n elif ('Qualifiers' in p and not 'Doubles' in p):\n qd += [p]\n\n md_result = None\n qd_result = None\n\n meta = None\n if md and not qualies_only:\n md_result = drawsheet_process(chr(12).join(md))\n meta = md_result[2]\n\n # copy the metadata to the quaily draw if possible\n if qd:\n qd_result = drawsheet_process(chr(12).join(qd), meta, True)\n\n return (md_result, qd_result)", "def parseword(intext): # type: (str) -> str\n\n wordbinarydata = base64.b64decode(intext.strip())\n wordFileObj = io.BytesIO()\n wordFileObj.write(wordbinarydata)\n theword = docx.Document(wordFileObj)\n extractedText = ''\n for para in theword.paragraphs:\n extractedText = extractedText + para.text + '\\n'\n\n return extractedText", "def extract_page_text(html):\n soup = bs4.BeautifulSoup(html)\n\n # Remove <script/> and <style/> content\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n\n # Strip leading and trailing whitespace from each line, then join all the\n # non-empty lines together.\n lines = (line.strip() for line in text.splitlines())\n text = '\\n'.join(line for line in lines if line)\n\n return text", "def preprocess(self, text):\r\n return text", "def get_sample_text(self, sample_file):\n text = get_text_pdf(sample_file)\n self.ui.plainTextEdit.appendPlainText(text)", "def pdf_preprocess(pdf):\n path = Path(pdf)\n if path.exists():\n # a filepath is provided, read and encode\n with path.open(\"rb\") as f:\n return base64.b64encode(f.read()).decode(\"utf-8\")\n else:\n # assume pdf is already b64 encoded\n return pdf", "def pdf_to_test(file_name):\n #Opening, reading and parsing a pdf file to string\n pdfFileObj = open(file_name, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pdf_string = pdfReader.getPage(0).extractText()\n \n #Find the RechnungsNr.\n start_of_RN = pdf_string.find(\"No.Invoice Date\") + len(\"No.Invoice Date\")\n rechnungs_nr = pdf_string[start_of_RN:start_of_RN+7]\n \n #Find the address\n start_of_address = pdf_string.find(\"Invoice Address\") + len(\"Invoice Address\")\n end_of_address = pdf_string.find(\"Payment Terms:\")\n address = pdf_string[start_of_address:end_of_address]\n \n #Liefermonat commenrs\n start_of_contract = pdf_string.find(\"Company Name / Line of business\") + len(\"Company Name / Line of business\")\n end_of_contract = pdf_string.find(\"Summary of Charges\")\n contract = pdf_string[start_of_contract:end_of_contract]\n \n #Nettobetrag - read base charge\n start_of_netto = pdf_string.find(\"Base Charges\") + len(\"Base Charges\")\n end_of_netto = pdf_string.find(\"Click Charges - Color\")\n nettobetrag = pdf_string[start_of_netto:end_of_netto]\n \n pdfFileObj.close()\n \n return pdfFileObj.name, rechnungs_nr, address, contract, nettobetrag", "def get_text(self):\n txt = self.lang.tool.image_to_string(\n self.image,\n lang=self.lang,\n builder=pyocr.builders.TextBuilder()\n )\n return txt", "def process_text(document):\n return preprocess_string(document,\n filters=[strip_tags, strip_punctuation,\n strip_multiple_whitespaces,\n strip_numeric, remove_stopwords,\n strip_short]\n )", "def raw_text(self):\n return self._raw_text", "def extract(self, document):\n raise NotImplementedError('FeatureExtractorBase:extract(self, text) is not defined')", "def load_pdf(self, env=\"default\", debug=()):\n os.makedirs(\"txt\", exist_ok=True)\n if env is \"default\": # default python path\n call([executable,\n os.path.join(f\"{exec_prefix}\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n if env is \"venv\": # virtual environment\n call([os.path.join(\"venv\", \"Scripts\", \"python.exe\"),\n os.path.join(\"venv\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n with open(os.path.join(\"txt\", f\"{self.txt_filename}\"), \"r\", encoding=\"utf-8\") as file:\n self.paragraphs = [paragraph.rstrip('\\n') for paragraph in file]\n os.remove(os.path.join(\"txt\", f\"{self.txt_filename}\"))\n if debug:\n for counter, paragraph in enumerate(self.paragraphs):\n try:\n if int(debug[0]) < counter < int(debug[1]):\n print(counter, paragraph)\n except TypeError:\n print(\"Debug must be a (x,y) touple.\")", "def extractText(postSoup):\n for tag in postSoup.findAll(True):\n if tag.name in (\"code\"):\n tag.extract()\n else:\n tag.hidden=True\n\n return postSoup.renderContents()", "def plain_text_(self):\n return self.content.decode(self.encoding)", "def detect_document(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.document_text_detection(image=image)\n\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n #print('\\nBlock confidence: {}\\n'.format(block.confidence))\n for paragraph in block.paragraphs:\n for word in paragraph.words:\n word_text = ''.join([symbol.text for symbol in word.symbols])\n text.append(word_text.encode('utf-8'))\n #print(word_text)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def get_corpus(self):\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n codec = 'utf-8'\n laparams = LAParams()\n device = TextConverter(\n rsrcmgr, retstr, codec=codec, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n maxpages = 0\n caching = True\n pagenos = set()\n pages = PDFPage.get_pages(\n self.stream_in,\n pagenos,\n maxpages=maxpages,\n password=\"\",\n caching=caching,\n check_extractable=True\n )\n for page in pages:\n interpreter.process_page(page)\n device.close()\n text = retstr.getvalue()\n retstr.close()\n return text", "def detect_document(path):\n from google.cloud import vision\n import io\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.Image(content=content)\n\n response = client.document_text_detection(image=image)\n if response.error.message:\n raise Exception('{}'.format(response.error.message))\n return response.full_text_annotation.text", "def extractText(html_code):\n html_tree = html.fromstring(html_code)\n chapter_list = html_tree.find_class(\"chapter\")\n chapter_text = chapter_list[0].text_content()\n return chapter_text", "def ocr_core(filename):\n \n text = pytesseract.image_to_string(Image.open(filename), lang=\"rus\") # We'll use Pillow's Image class to open the image and pytesseract to detect the string in the image\n return text", "def get_data_from_nonformat_text():\n pass", "def __init__(self, text, pdf=None):\n if pdf == None and text != None:\n self.text = process_text(text)\n return\n elif pdf == None and text == None:\n raise Exception('EULA initialization failed')\n \n output_string = StringIO()\n readFile = open(pdf, 'rb')\n with readFile as in_file:\n parser = PDFParser(in_file)\n doc = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n device = TextConverter(rsrcmgr, output_string, laparams=LAParams())\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.create_pages(doc):\n interpreter.process_page(page)\n readFile.close()\n\n self.text = process_text(output_string.getvalue())", "def test_force_ocr(monkeypatch):\n monkeypatch.setattr(PdfReader, 'ocr_text', lambda self: 'content')\n pdf = PdfReader(path='/')\n assert pdf.read_text(allow_ocr=True, force_ocr=True) == 'content'", "def get_document_details(document_id, session=konfuzio_session()):\n url = get_document_api_details_url(document_id, include_extractions=False, extra_fields='bbox,hocr')\n r = retry_get(session, url)\n data = r.json()\n text = data[\"text\"]\n annotations = data[\"annotations\"]\n sections = data[\"sections\"]\n if text is None:\n logger.warning(f'Document with ID {document_id} does not contain any text, check OCR status.')\n else:\n logger.info(f'Document with ID {document_id} contains {len(text)} characters '\n f'and {len(annotations)} annotations in {len(sections)} sections.')\n\n return data", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def get_text(self):", "def scrape(data):\n result = {}\n xml_str = scraperwiki.pdftoxml(data)\n root = xml.etree.ElementTree.fromstring(xml_str)\n page_id = 0\n for page in root:\n page_id += 1\n for text in page.iter(tag=\"text\"):\n if text.get(\"font\") == \"3\":\n text_id = (page_id, text.get(\"top\"))\n row = result.get(text_id, \"\")\n if row and len(row) < 60:\n row = row + \" \" * (60 - len(row))\n result[text_id] = row + text.text\n return result", "def _get_text(raw_html):\n bs = BeautifulSoup(raw_html)\n text_nodes = bs.find_all(_is_text_tag)\n text_elements = [_get_child_text(node) for node in text_nodes]\n return ' '.join(chain(*chain(*text_elements)))", "def pdf_miner_extract(pdf_file, password='', pages=0):\n pdf_resource_manager = PDFResourceManager()\n output_stream = StringIO()\n device = TextConverter(pdf_resource_manager, output_stream,\n laparams=LAParams(char_margin=0.8, detect_vertical=False))\n file_stream = open(pdf_file, 'rb')\n interpreter = PDFPageInterpreter(pdf_resource_manager, device)\n pages_set = []\n for page in PDFPage.get_pages(file_stream, set(), pages, password):\n interpreter.process_page(page)\n pages_set.append(output_stream.getvalue())\n output_stream.truncate(0)\n file_stream.close()\n device.close()\n output_stream.close()\n return pages_set", "def get_text(data_path):\n\tp = get_full_path(data_path)\n\tf = open(p, 'r')\n\tcontent = f.read()\n\tf.close()\n\treturn content", "def do(self, pdf_path: str) -> (str, list, list):\n hocr_list, images = self.processPdf(pdf_path)\n hocr_final = self.combineHocr(hocr_list, pdf_path.split('/')[-1])\n return hocr_final, hocr_list, images", "def parse_pdf(url):\n pdf_data = urllib2.urlopen(Request(url)).read()\n # Cast to StringIO object\n from StringIO import StringIO\n memory_file = StringIO(pdf_data)\n\n # Create a PDF parser object associated with the StringIO object\n parser = PDFParser(memory_file)\n\n # Create a PDF document object that stores the document structure\n document = PDFDocument(parser)\n\n # Define parameters to the PDF device object\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n pageno = 1\n codec = 'utf-8'\n\n # Create a PDF device object\n device = TextConverter(rsrcmgr, retstr, codec=codec, pageno=pageno,\n laparams=laparams)\n\n # Create a PDF interpreter object\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n\n # Process each page contained in the document\n text = ''\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n text = retstr.getvalue()\n\n vol = get_vol(text)\n no = get_no(text)\n return vol, no", "def ocr_core(self, filename):\n pytesseract.pytesseract.tesseract_cmd = PATH_INSTALLAZIONE_TESSERACT\n try:\n # We'll use Pillow's Image class to open the image and pytesseract to detect the string in the image\n text = pytesseract.image_to_string(Image.open(filename))\n except Exception as e:\n print(\"Problema con il riconoscimento di una delle immagini!\")\n print(e)\n text = ''\n print(text)\n return text", "def get(self, docid):\n file = os.path.join(self.dirname, docid)\n with open(file,'r',encoding='utf-8') as f:\n text = f.read()\n return text", "def get_plain_text(self):\n raise NotImplementedError(\"get_plain_text is not implemented\")", "def get_text_lines(instText):\n\n # Find out which part this is\n part = instText.part\n # Get the necessary parameters: lng, ext, dir\n sLng = part.corpus.get_lng_display()\n sDir = part.dir\n sName = instText.fileName\n sFormat = instText.get_format_display()\n # Now try to get the information\n oBack = get_crpp_text(sLng, sDir, sFormat, sName)\n # Prepare what we return\n if oBack == None or oBack['status'] == 'error':\n return None\n else:\n return oBack", "def extract_table(path):\n re_ex = RE_EX\n pages = []\n page_num = 1\n with open(path, 'rb') as in_file:\n parser = PDFParser(in_file)\n doc = PDFDocument(parser)\n for page in PDFPage.create_pages(doc):\n rsrcmgr = PDFResourceManager()\n output_string = StringIO()\n device = TextConverter(rsrcmgr, output_string, laparams=LAParams())\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n interpreter.process_page(page)\n finder = re.search(re_ex, output_string.getvalue(), re.IGNORECASE)\n print('Searching table', '\\tCurrent page:', page_num)\n if finder:\n print('Table finded.')\n pages.append(page_num)\n break\n\n page_num += 1\n\n table = extract_text(path, pages)\n table = isolate(table)\n table = add_separations(table)\n\n return table", "def clean_pdf_page(page): # Cleans a pdftotext page\n return [re.sub(\"\\s+\", \" \", i.strip()) for i in page.split(\"\\n\")]", "def page_text(tree, level=\"region\", index=\"0\"):\n\n nsmap = {'page': page_namespace(tree)}\n\n def region_text(region, index=\"0\"):\n try:\n reg = region.find('./page:TextEquiv/page:Unicode', namespaces=nsmap)\n if reg is not None and reg.getparent().attrib.get('index','-1') in ['-1', index]:\n return region.find('./page:TextEquiv/page:Unicode', namespaces=nsmap).text\n else:\n return None\n except AttributeError:\n return None\n\n def line_text(region, index=\"0\"):\n try:\n return \"\\n\".join([line.text for line in (region.findall('./page:TextLine/page:TextEquiv/page:Unicode', namespaces=nsmap))\n if line is not None and line.getparent().attrib.get('index','-1') in ['-1', index]])\n except AttributeError:\n return None\n\n def word_text(region, index=\"0\"):\n try:\n text = []\n textlines = []\n lines = region.findall('./page:TextLine/', namespaces=nsmap)\n for line in lines:\n words = line.findall('./page:TextEquiv/page:Unicode', namespaces=nsmap)\n if words:\n for word in words:\n if word.getparent().attrib.get('index','-1') in ['-1', index]:\n text.append(word.text)\n elif text != []:\n textlines.append(\" \".join(text))\n text = []\n return \"\\n\".join(textlines)\n except AttributeError:\n return None\n\n region_texts = []\n reading_order = tree.find('.//page:ReadingOrder', namespaces=nsmap)\n if reading_order is not None:\n for group in reading_order.iterfind('./*', namespaces=nsmap):\n if ET.QName(group.tag).localname == 'OrderedGroup':\n region_ref_indexeds = group.findall('./page:RegionRefIndexed', namespaces=nsmap)\n for region_ref_indexed in sorted(region_ref_indexeds, key=lambda r: int(r.attrib['index'])):\n region_id = region_ref_indexed.attrib['regionRef']\n region = tree.find('.//page:TextRegion[@id=\"%s\"]' % region_id, namespaces=nsmap)\n if region is not None and level == \"region\":\n region_texts.append(region_text(region, index))\n elif region is not None and level == \"line\":\n region_texts.append(line_text(region, index))\n elif region is not None and level == \"word\":\n region_texts.append(word_text(region, index))\n else:\n warn('Not a TextRegion: \"%s\"' % region_id)\n else:\n raise NotImplementedError\n else:\n for region in tree.iterfind('.//page:TextRegion', namespaces=nsmap):\n if region is not None and level == \"region\":\n region_texts.append(region_text(region, index))\n elif region is not None and level == \"line\":\n region_texts.append(line_text(region, index))\n elif region is not None and level == \"word\":\n region_texts.append(word_text(region, index))\n\n # XXX Does a file have to have regions etc.? region vs lines etc.\n # Filter empty region texts\n region_texts = (t for t in region_texts if t)\n\n text_ = '\\n'.join(region_texts)\n\n return text_", "def getPDF(self):\n return self.pdfSample", "def get_text(self):\n return self.text[:500]", "def graphtextdetextor(image_path):\n img=cv2.imread(image_path)\n\n #img=image_filter.rotate_anticlockwise(img)\n\n\n custom_config_number=r'--oem 3 --psm 6 outputbase digits'\n custom_config=r'--oem 3 --psm 6'\n\n custom_config1=r'--oem 3 --psm 1'\n\n custom_config2=r'--oem 3 --psm 4'\n\n text=pytesseract.image_to_string(img,config=custom_config)\n text2=pytesseract.image_to_string(img,config=custom_config1)\n text3=pytesseract.image_to_string(img,config=custom_config2)\n\n\n\n d=pytesseract.image_to_data(img,config=custom_config,output_type=Output.DICT)\n\n #print(text3)\n return [text,text2,text3]", "def extract_page_text(self, bs_object):\n\n # kill all script and style elements\n for script in bs_object([\"script\", \"style\", \"head\"]):\n script.extract() # rip it out\n\n # get text\n text = bs_object.get_text()\n\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text_list_gen = (chunk for chunk in chunks if chunk)\n text_list = list(text_list_gen)\n # print \"TEXT LIST >>>\\n\", text_list\n \n return text_list", "def convertPDF(pdf_path, codec='ascii'):\n \n if pdf_path[:4] == 'http':\n print 'first downloading %s ...' % (pdf_path,)\n urllib.urlretrieve(pdf_path, 'temp.pdf')\n pdf_path = 'temp.pdf'\n \n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n \n fp = file(pdf_path, 'rb')\n process_pdf(rsrcmgr, device, fp)\n fp.close()\n device.close()\n \n str = retstr.getvalue()\n retstr.close()\n \n return str", "def extract_scanned(file_path):\n total_text = \"\"\n output_folder = os.path.join(cf.data_path, \"images\")\n try:\n images = convert_from_path(file_path, dpi=300, output_folder=output_folder, first_page=1, last_page=None, fmt='jpg',\n thread_count=1, userpw=None)\n image_name = images[0].filename\n for page_num in range(len(images)):\n image = cv2.imread(images[page_num].filename)\n text = pytesseract.image_to_string(image, lang=\"eng\", config='--psm 6')\n total_text += text\n print(\"\\n number of pages extracted : \", len(images))\n except Exception as e:\n print(\"\\n Error in convert2image : \", traceback.format_exc(()))\n return total_text", "def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')", "def detect_text(self):\n client = vision.ImageAnnotatorClient()\n # with open(self.path, 'rb') as image_file:\n # content = image_file.read()\n content=self.image_bytes\n image = vision.types.Image(content=content)\n response = client.text_detection(image=image)\n texts = response.text_annotations\n bubble_text= texts[0].description\n bubble_text=str(bubble_text).strip()\n cleaned_bubble_text = preprocess_bubble_text(bubble_text)\n if len(cleaned_bubble_text)<5:\n bubble_text=None\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))\n return bubble_text" ]
[ "0.77171206", "0.75832546", "0.7520995", "0.7419466", "0.7063826", "0.7058838", "0.7054497", "0.7044647", "0.6961371", "0.681999", "0.6804874", "0.67496186", "0.6660395", "0.6636881", "0.66142976", "0.6598252", "0.65733755", "0.64733654", "0.64633906", "0.645397", "0.64384454", "0.6425408", "0.6410385", "0.63699144", "0.6325862", "0.6323204", "0.6259165", "0.622043", "0.6219724", "0.6217804", "0.6169586", "0.61532897", "0.61195904", "0.6117244", "0.61073554", "0.6051322", "0.60468376", "0.6025157", "0.5994801", "0.5986311", "0.5985036", "0.59712875", "0.5937868", "0.5937263", "0.5920236", "0.5895462", "0.5884233", "0.5863683", "0.5822725", "0.5817397", "0.5813585", "0.5789938", "0.57883835", "0.5782538", "0.5762816", "0.5759662", "0.5721933", "0.57026094", "0.570064", "0.5694164", "0.56896335", "0.56888545", "0.5682643", "0.5681837", "0.56691194", "0.56691194", "0.56691194", "0.56691194", "0.56691194", "0.5647803", "0.56333107", "0.5628327", "0.5611652", "0.5607002", "0.5594152", "0.55911124", "0.5585331", "0.55849695", "0.5576148", "0.55695003", "0.5559404", "0.55506253", "0.55505544", "0.55442446", "0.55423963", "0.5537256", "0.55194294", "0.550994", "0.5488121", "0.54710996", "0.54707706", "0.5461263", "0.54553735", "0.5445587", "0.54413736", "0.54385185", "0.54314125", "0.5426286", "0.54218847", "0.54192597" ]
0.6915857
9
A bad way to add row separations
def add_separations(pages, space_tolerance=3): return re.search(r' *ID +([\d|\.]+) *', pages[0]).groups()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_row(row,i):\n # convert string\n char_array = np.array(list(row))\n\n #insert entry dividers, then split by them\n div_ix = (\n np.array([6, 34, 48, 51, 54, 60, 64, 67, 72, 80, 86, 94, 100,\n 107, 112, 119, 125, 137, 141, 145, 156]),\n )\n char_array[div_ix] = ','\n new_csv_row = (''.join(char_array)).split(',')\n\n # remove excess whitespace surrounding data\n new_csv_row = np.array([entry.strip() for entry in new_csv_row])\n\n return new_csv_row", "def _gen_pre_lines(self):\n for row in self.row_major_matrix:\n arr = []\n css_classes = [self.legend.value_to_css_class(v) for v in row]\n for css_class, count in iterutils.rle(css_classes):\n arr.append('<span class=\"%s\">' % css_class)\n arr.append('&nbsp;' * count)\n arr.append('</span>')\n yield ''.join(arr)", "def _get_separator_count(self):\n\n return 2", "def newRowsOnSeparator(self):\n\n # Prompts user for column and separator\n column, separator = Model.NewRowsOnSeparatorDialogBox.getResults(self.getCurrentPanda(), self)\n\n # Pass to panda\n self.getCurrentPanda().newRowsOnSeparator(column, separator)", "def separator(self):\n pass", "def _parse_row(row: str):\n final_row = []\n for char in row:\n\n # any number N expands into N spaces\n if char in \"12345678\":\n for i in range(int(char)):\n final_row.append(EMPTY_SPACE)\n else:\n final_row.append(char)\n\n return final_row", "def rebuild_row(lst, is_collocation):\n split_list = lst[0].split(\"\\t\")\n if is_collocation:\n return [split_list[0] + \" \" + split_list[1], \"1\"]\n return [split_list[0] + \" \" + split_list[1], \"0\"]", "def format_row(self, row):\n raise NotImplementedError()", "def generate_grid_separator_row(width: int) -> str:\r\n row = \"\"\r\n\r\n for _ in range(width):\r\n row += CORNER + (3 * HORIZONTAL_WALL)\r\n\r\n row += CORNER\r\n return row", "def add_row_skips(vals):\n skips = {}\n # Find all skips requried for each rows\n for k, v in vals.iteritems():\n if not k or '\\\\skiprow' not in k:\n continue # No skip for this field\n row = 0\n for value in v:\n num_skip = isinstance(value, basestring) and \\\n value.count('\\\\skiprow') or 0\n if skips.get(row, 0) < num_skip:\n skips.update({row: num_skip})\n row += 1\n if not skips:\n return vals\n # Add skips for all other fields\n for k, v in vals.iteritems():\n row = 0\n for value in v:\n num_skip = isinstance(value, basestring) and \\\n value.count('\\\\skiprow') or 0\n if skips.get(row, 0) > num_skip:\n value = str(value)\n for x in range(skips[row] - num_skip):\n value += '\\\\skiprow'\n v[row] = value\n row += 1\n return vals", "def newrow(self):\n maxlen = 0\n for colbuf in self.colbufs:\n maxlen = max(maxlen, len(colbuf))\n\n for i in range(maxlen):\n first = True\n for colbuf in self.colbufs:\n if first:\n first = False\n else:\n sys.stdout.write(self.sepstr)\n if i < len(colbuf):\n sys.stdout.write(colbuf[i])\n else:\n sys.stdout.write(\" \"*self.colwidth)\n sys.stdout.write(\"\\n\")\n\n self.colbufs = []\n for i in range(self.ncolumns):\n self.colbufs.append([])", "def require_separator(self):\n return False", "def tab_delim_table(self):\n self.generate()\n\n header = ' \\t '.join([r'{: ^7}'.format(col) for col in self.columns])\n lines = []\n for row in self.rows:\n bits = []\n for col in self.columns:\n if col in self.formatters:\n bits.append(self.formatters[col].format(row[col]))\n else:\n bits.append(self.formatters.get(col, '{: ^7}').format(row[col] if row[col] else ''))\n lines.append(' \\t '.join(bits))\n\n return \"{}\\n{}\".format(header, '\\n'.join(lines))", "def _newLine(self, usePos = True):", "def rows(file, prep=None,\n whitespace='[\\n\\r\\t]',\n comments='#.*',\n sep=\",\"\n ):\n doomed = re.compile('(' + whitespace + '|' + comments + ')')\n with open(file) as fs:\n for line in fs:\n line = re.sub(doomed, \"\", line)\n if line:\n row = map(lambda z: z.strip(), line.split(sep))\n if len(row) > 0:\n yield prep(row) if prep else row", "def small_preprocess_singlerow(data):\r\n # Remove new line characters\r\n data = re.sub('\\s+', ' ', data) \r\n # Remove distracting single quotes\r\n data = re.sub(\"\\'\", \"\", data)\r\n\r\n return data", "def _createrow(self, columns):\r\n # Each cell is a column padded to len(self)\r\n cells = []\r\n \r\n for col in columns:\r\n # left-justify and pad to len(self)\r\n cells.append(col.ljust(len(self)))\r\n \r\n # Join all the cells into one row str, with ' | ' as the separator\r\n return ' | '.join(cells)", "def format_rows(rows, newarray):\n for row in range(rows):\n line = 0\n letters = [\"\", \"A. \", \"B. \", \"C. \", \"D. \", \"\"]\n for letter in letters:\n dummystring = letter + newarray[row, line]\n newarray[row, line] = dummystring\n line +=1\n print(newarray[row])\n return newarray", "def showSeparator():\n\treturn (1, 0)", "def split_rows(l):\n row0 = [l[0], l[3], l[7]]\n row1 = [l[1], l[4], l[8], l[12]]\n row2 = [l[2], l[5], l[9], l[13], l[16]]\n row3 = [l[6], l[10], l[14], l[17]]\n row4 = [l[11], l[15], l[18]]\n return [row0, row1, row2, row3, row4]", "def _tabulate_data(\n self, headers, tabular_data, column_spacing=2, divider='-'\n ):\n max_lengths = [len(str(header)) for header in headers]\n for data_row in tabular_data:\n for column_index, item in enumerate(data_row):\n item = str(item).replace(self.color_package, '')\n item = str(item).replace(self.color_foreground, '')\n if len(str(item)) > max_lengths[column_index]:\n max_lengths[column_index] = len(str(item))\n\n dividers = [divider * length for length in max_lengths]\n\n def tabulate_row(items):\n row = ''\n item_template = '{item}{spacing}'\n for i, row_item in enumerate(items):\n\n # clear colors before calculating\n colorless_item = (\n str(row_item).replace(self.color_package, '')\n )\n colorless_item = colorless_item.replace(\n self.color_foreground, '')\n\n item_spacing = ' ' * (\n max_lengths[i] +\n column_spacing -\n len(str(colorless_item))\n )\n row += item_template.format(\n item=row_item, spacing=item_spacing)\n return row.strip() + '\\n'\n\n result = tabulate_row(items=headers)\n result += tabulate_row(items=dividers)\n for data_row in tabular_data:\n result += tabulate_row(items=data_row)\n\n return result.rstrip()", "def createRow(self):\n row = []\n for col in range(self.width):\n row += ['']\n return row", "def addSeparatorFeature(self):\n \n # graphical separators\n dNS = {\"pc\":PageXml.NS_PAGE_XML}\n someNode = self.lNode[0]\n ndPage = someNode.node.xpath(\"ancestor::pc:Page\", namespaces=dNS)[0]\n lNdSep = ndPage.xpath(\".//pc:SeparatorRegion\", namespaces=dNS)\n loSep = [ShapeLoader.node_to_LineString(_nd) for _nd in lNdSep]\n \n if self.bVerbose: traceln(\" %d graphical separators\"%len(loSep))\n\n # make an indexed rtree\n idx = index.Index()\n for i, oSep in enumerate(loSep):\n idx.insert(i, oSep.bounds)\n \n # take each edge in turn and list the separators it crosses\n nCrossing = 0\n for edge in self.lEdge:\n # bottom-left corner to bottom-left corner\n oEdge = geom.LineString([(edge.A.x1, edge.A.y1), (edge.B.x1, edge.B.y1)])\n prepO = prep(oEdge)\n lCrossingPoints = []\n fSepTotalLen = 0\n for i in idx.intersection(oEdge.bounds):\n # check each candidate in turn\n oSep = loSep[i]\n if prepO.intersects(oSep):\n fSepTotalLen += oSep.length\n oPt = oEdge.intersection(oSep)\n if type(oPt) != geom.Point:\n traceln('Intersection in not a point: skipping it')\n else:\n lCrossingPoints.append(oPt)\n \n if lCrossingPoints:\n nCrossing += 1\n edge.bCrossingSep = True\n edge.sep_NbCrossing = len(lCrossingPoints)\n minx, miny, maxx, maxy = geom.MultiPoint(lCrossingPoints).bounds\n edge.sep_SpanLen = abs(minx-maxx) + abs(miny-maxy)\n edge.sep_AvgSpanSgmt = edge.sep_SpanLen / len(lCrossingPoints) \n edge.sep_AvgSepLen = fSepTotalLen / len(lCrossingPoints)\n else:\n edge.bCrossingSep = False\n edge.sep_NbCrossing = 0\n edge.sep_SpanLen = 0\n edge.sep_AvgSpanSgmt = 0 \n edge.sep_AvgSepLen = 0\n \n #traceln((edge.A.domid, edge.B.domid, edge.bCrossingSep, edge.sep_NbCrossing, edge.sep_SpanLen, edge.sep_AvgSpanSgmt, edge.sep_AvgSepLen))\n \n \n if self.bVerbose: \n traceln(\" %d (/ %d) edges crossing at least one graphical separator\"%(nCrossing, len(self.lEdge)))", "def record_row_delimiter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"record_row_delimiter\")", "def record_row_delimiter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"record_row_delimiter\")", "def _tab_newline_replace( self, fromlines, tolines ):\n\t\tdef expand_tabs( line ):\n\t\t\t# hide real spaces\n\t\t\tline = line.replace( ' ', '\\0' )\n\t\t\t# expand tabs into spaces\n\t\t\tline = line.expandtabs( self._tabsize )\n\t\t\t# relace spaces from expanded tabs back into tab characters\n\t\t\t# (we'll replace them with markup after we do differencing)\n\t\t\tline = line.replace( ' ', '\\t' )\n\t\t\treturn line.replace( '\\0', ' ' ).rstrip( '\\n' )\n\t\tfromlines = [expand_tabs( line ) for line in fromlines]\n\t\ttolines = [expand_tabs( line ) for line in tolines]\n\t\treturn fromlines, tolines", "def processRow(self, row):\n\t\tif self.delim is not None:\n\t\t\trowArr = row.split(self.delim)\n\t\t\tmsg = \"row does not have expected number of columns found \" + str(len(rowArr)) + \" expected \" + str(self.rowSize)\n\t\t\tassert len(rowArr) == self.rowSize, msg\n\t\telse:\n\t\t\trowArr = row\n\t\t\t\n\t\tnewRowArr = []\n\t\tfor i in range(len(rowArr)):\n\t\t\tcurVal = rowArr[i]\n\t\t\tif (i in self.catValues):\n\t\t\t\tvalues = self.catValues[i]\n\t\t\t\tfor val in values:\n\t\t\t\t\tif val == curVal:\n\t\t\t\t\t\tnewVal = self.trueVal\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewVal = self.falseVal\n\t\t\t\t\tnewRowArr.append(newVal)\n\t\t\telse:\n\t\t\t\tnewRowArr.append(curVal)\n\t\tassert len(newRowArr) == self.newRowSize, \"invalid new row size \" + str(len(newRowArr)) + \" expected \" + str(self.newRowSize)\n\t\tencRow = self.delim.join(newRowArr) if self.delim is not None else newRowArr\n\t\treturn encRow", "def _gen_table_rows(self):\n for row in self.row_major_matrix:\n arr = []\n for value in row:\n css_class = self.legend.value_to_css_class(value)\n arr.append('<td class=\"%s\">&nbsp;</td>' % css_class)\n yield ''.join(arr)", "def insertLines(data):\n data = pd.DataFrame(data)\n for _,row in data.iterrows():\n insertLine(row)", "def test_transpose_lines(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n line 1\n first line\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.2\", \"2.2\"),\n after_sel=(\"2.10\", \"2.10\"),\n command_name=\"transpose-lines\",\n )", "def _pretty_table_line(self, items):\n padded_strings = []\n for i, s in enumerate(items):\n padding_value = self._padding_values[i]\n padded_strings.append('{:<{}s}'.format(str(s), padding_value))\n return \" \" + \"| \".join(padded_strings)", "def fake_clean_row(row):\n\treturn row", "def test_blank_last_line():\n column_1 = Column(\"Col 1\", width=10)\n tc = TableCreator([column_1])\n\n row_data = ['my line\\n\\n']\n row = tc.generate_row(row_data=row_data, is_header=False)\n assert row == ('my line \\n'\n ' ')\n\n row_data = ['\\n']\n row = tc.generate_row(row_data=row_data, is_header=False)\n assert row == ' '\n\n row_data = ['']\n row = tc.generate_row(row_data=row_data, is_header=False)\n assert row == ' '", "def table_row(self, content):\n return ['<tr>\\n%s</tr>\\n'] + content", "def test_init_with_field_dict_and_custom_field_separator(self):\n fields = {\n 'Column 1': 'a=${aaa}',\n 'Column 2': 'b=${bbb}',\n 'Column 3': 'c=${ccc}',\n }\n csv_formatter = CSVFormatter(fields=fields, sep=\" || \")\n csv = csv_formatter.format_records(self.records)\n\n csv_expected = textwrap.dedent(\"\"\"\\\n #Column 1 || Column 2 || Column 3\n a=foobar_01 || b=8 || c=4898FE19\n a=foobar_02 || b=160 || c=5825D187\n a=foobar_03 || b=99 || c=3648A436\n \"\"\")\n\n assert csv == csv_expected", "def format_line_lean(self):\n\t\tline = \"<tr>\\\n\t\t\t\t\t<td>\\\n\t\t\t\t\t\t<font size='2'>\" + self.tag + \"</font>\\\n\t\t\t\t\t</td>\\\n\t\t\t\t\t<td>\\\n\t\t\t\t\t\t<font size='2'>\" + self.value + \"</font>\\\n\t\t\t\t\t</td>\\\n\t\t\t\t</tr>\"\n\t\treturn line", "def test_doubled_quotes_segv():\n tbl = dedent(\n \"\"\"\n \"ID\",\"TIMESTAMP\",\"addendum_id\",\"bib_reference\",\"bib_reference_url\",\"client_application\",\"client_category\",\"client_sort_key\",\"color\",\"coordsys\",\"creator\",\"creator_did\",\"data_pixel_bitpix\",\"dataproduct_subtype\",\"dataproduct_type\",\"em_max\",\"em_min\",\"format\",\"hips_builder\",\"hips_copyright\",\"hips_creation_date\",\"hips_creation_date_1\",\"hips_creator\",\"hips_data_range\",\"hips_estsize\",\"hips_frame\",\"hips_glu_tag\",\"hips_hierarchy\",\"hips_initial_dec\",\"hips_initial_fov\",\"hips_initial_ra\",\"hips_lon_asc\",\"hips_master_url\",\"hips_order\",\"hips_order_1\",\"hips_order_4\",\"hips_order_min\",\"hips_overlay\",\"hips_pixel_bitpix\",\"hips_pixel_cut\",\"hips_pixel_scale\",\"hips_progenitor_url\",\"hips_publisher\",\"hips_release_date\",\"hips_release_date_1\",\"hips_rgb_blue\",\"hips_rgb_green\",\"hips_rgb_red\",\"hips_sampling\",\"hips_service_url\",\"hips_service_url_1\",\"hips_service_url_2\",\"hips_service_url_3\",\"hips_service_url_4\",\"hips_service_url_5\",\"hips_service_url_6\",\"hips_service_url_7\",\"hips_service_url_8\",\"hips_skyval\",\"hips_skyval_method\",\"hips_skyval_value\",\"hips_status\",\"hips_status_1\",\"hips_status_2\",\"hips_status_3\",\"hips_status_4\",\"hips_status_5\",\"hips_status_6\",\"hips_status_7\",\"hips_status_8\",\"hips_tile_format\",\"hips_tile_format_1\",\"hips_tile_format_4\",\"hips_tile_width\",\"hips_version\",\"hipsgen_date\",\"hipsgen_date_1\",\"hipsgen_date_10\",\"hipsgen_date_11\",\"hipsgen_date_12\",\"hipsgen_date_2\",\"hipsgen_date_3\",\"hipsgen_date_4\",\"hipsgen_date_5\",\"hipsgen_date_6\",\"hipsgen_date_7\",\"hipsgen_date_8\",\"hipsgen_date_9\",\"hipsgen_params\",\"hipsgen_params_1\",\"hipsgen_params_10\",\"hipsgen_params_11\",\"hipsgen_params_12\",\"hipsgen_params_2\",\"hipsgen_params_3\",\"hipsgen_params_4\",\"hipsgen_params_5\",\"hipsgen_params_6\",\"hipsgen_params_7\",\"hipsgen_params_8\",\"hipsgen_params_9\",\"label\",\"maxOrder\",\"moc_access_url\",\"moc_order\",\"moc_release_date\",\"moc_sky_fraction\",\"obs_ack\",\"obs_collection\",\"obs_copyrigh_url\",\"obs_copyright\",\"obs_copyright_1\",\"obs_copyright_url\",\"obs_copyright_url_1\",\"obs_description\",\"obs_description_url\",\"obs_descrition_url\",\"obs_id\",\"obs_initial_dec\",\"obs_initial_fov\",\"obs_initial_ra\",\"obs_provenance\",\"obs_regime\",\"obs_title\",\"ohips_frame\",\"pixelCut\",\"pixelRange\",\"prov_did\",\"prov_progenitor\",\"prov_progenitor_url\",\"publisher_did\",\"publisher_id\",\"s_pixel_scale\",\"t_max\",\"t_min\"\n \"CDS/P/2MASS/H\",\"1524123841000\",\"\",\"2006AJ....131.1163S\",\"http://cdsbib.unistra.fr/cgi-bin/cdsbib?2006AJ....131.1163S\",\"AladinDesktop\",\"Image/Infrared/2MASS\",\"04-001-03\",\"\",\"\",\"\",\"ivo://CDS/P/2MASS/H\",\"\",\"\",\"image\",\"1.798E-6\",\"1.525E-6\",\"\",\"Aladin/HipsGen v9.017\",\"CNRS/Unistra\",\"2013-05-06T20:36Z\",\"\",\"CDS (A.Oberto)\",\"\",\"\",\"equatorial\",\"\",\"mean\",\"\",\"\",\"\",\"\",\"\",\"9\",\"\",\"\",\"\",\"\",\"\",\"0 60\",\"2.236E-4\",\"\",\"\",\"2016-04-22T13:48Z\",\"\",\"\",\"\",\"\",\"\",\"http://alasky.unistra.fr/2MASS/H\",\"https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H\",\"http://alaskybis.unistra.fr/2MASS/H\",\"https://alaskybis.unistra.fr/2MASS/H\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"public master clonableOnce\",\"public mirror unclonable\",\"public mirror clonableOnce\",\"public mirror clonableOnce\",\"\",\"\",\"\",\"\",\"\",\"jpeg fits\",\"\",\"\",\"512\",\"1.31\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"http://alasky.unistra.fr/2MASS/H/Moc.fits\",\"9\",\"\",\"1\",\"University of Massachusetts & IPAC/Caltech\",\"The Two Micron All Sky Survey - H band (2MASS H)\",\"\",\"University of Massachusetts & IPAC/Caltech\",\"\",\"http://www.ipac.caltech.edu/2mass/\",\"\",\"2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0\"\". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF\",\"\",\"\",\"\",\"+0\",\"0.11451621372724685\",\"0\",\"\",\"Infrared\",\"2MASS H (1.66um)\",\"\",\"\",\"\",\"\",\"IPAC/NASA\",\"\",\"\",\"\",\"\",\"51941\",\"50600\"\n \"\"\"\n )\n ascii.read(tbl, format=\"csv\", fast_reader=True, guess=False)", "def get_rowkeys(\n table_instance, rowkeys: List[str], sep: str = \"#\",\n) -> List[models.RowModelOdd]:\n row_model = [_get_single_row(table_instance, rowkey, sep=\":\") for rowkey in rowkeys]\n\n return row_model", "def test_get_separator_csv():\n # GIVEN a line with commas as delimiter\n line = \"one,two,three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert comma is returned\n assert sep == \",\"", "def gen_separator_thunk(self):\n separator_thunk = self.gen_new_thunk(0)\n return separator_thunk", "def test_get_separator_tab():\n # GIVEN a line with commas as delimiter\n line = \"one\\ttwo\\tthree\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert None is returned\n assert sep is None", "def vertical_table(\n data, headers, sep_title=\"{n}. row\", sep_character=\"*\", sep_length=27\n):\n header_len = max([len(x) for x in headers])\n padded_headers = [x.ljust(header_len) for x in headers]\n formatted_rows = [_format_row(padded_headers, row) for row in data]\n\n output = []\n for i, result in enumerate(formatted_rows):\n yield _get_separator(i, sep_title, sep_character, sep_length) + result", "def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]", "def recordDelimiterChoice(self):\n# Thanks to https://stackoverflow.com/questions/610883\n grid = self.ids.delimiterGrid\n for x in grid.children:\n try:\n if x.active:\n self.delim = x.name\n except AttributeError:\n pass\n # This function cleans the data and puts it back in the same file\n# self.plotter.normalizeCSV(self.filename, self.delim)\n self.headers = self.plotter.get_headers(self.filename, self.delim)\n # Dynamically construct the screen for axis selection\n self.header_choices('x')", "def triangle(row):\n if len(row) == 0:\n raise ValidationError(\"Row empty\")\n if len(row) == 1:\n return row\n if len(row) < 8:\n return small_triangles(row)\n make_steps()\n while len(row) > 50:\n streams = defaultdict(list)\n j = 0\n streams[j] = list(row)\n for i in range(len(row) - 4):\n for j in range(10):\n step = j * 4\n if i >= step:\n streams[j + 1].append(do_row(streams[j], i - step))\n row = streams[j]\n return small_triangles(row)", "def test_lstrip_whitespace(parallel, read_basic):\n text = \"\"\"\n 1, 2, \\t3\n A,\\t\\t B, C\n a, b, c\n \\n\"\"\"\n\n table = read_basic(text, delimiter=\",\", parallel=parallel)\n expected = Table([[\"A\", \"a\"], [\"B\", \"b\"], [\"C\", \"c\"]], names=(\"1\", \"2\", \"3\"))\n assert_table_equal(table, expected)", "def _format_column(self, row_data):\n return [[row[i] for row in row_data] for i in range(self.row_length)]", "def construct_rowbase(self):\n \n # build out the format string to enter the strings into\n row_bse = [r\"{}{{: >{}}}{}\".format(\n \" \"*self.left_pad,\n str(self.col_size[c]),\n \" \"*self.right_pad) for c in range(self.ncols+self.col_start)]\n return row_bse", "def _split_lines(self, lines, separator_marker):\n result = []\n current_group = []\n for line in lines:\n if re.match(rf'[^\\S\\n]*{separator_marker}\\w+(\\(.*\\))?:', line):\n if current_group:\n result.append(current_group)\n current_group = []\n current_group.append(line)\n if current_group:\n result.append(current_group)\n return result", "def tabing_tool(code):\n for i, line in enumerate(code):\n code[i] = ' '*4 + line\n return code", "def wrap_row(row, col_widths, hyphen_break=False, field_sep=' | ',\n break_chrs=''):\n assert len(row) == len(col_widths)\n # Broken fields (padded)\n broken_fields = [\n wrap(field, width, hyphen_break=hyphen_break, break_chrs=break_chrs)\n for field, width in zip(row, col_widths)\n ]\n # Transpose & join each broken line\n return '\\n'.join(\n field_sep.join(line) for line in zip_longest_strings(broken_fields)\n )", "def row(self,pre=\"\",indent=0,justname=25,justtitle=25,merged=True,split=True,colpass=False):\n xsec = \"%.2f\"%self.xsec if self.xsec>0 else \"\"\n nevts = \"%.1f\"%self.nevents if self.nevents>=0 else \"\"\n sumw = \"%.1f\"%self.sumweights if self.sumweights>=0 else \"\"\n norm = \"%.4f\"%self.norm\n split_ = split and self.splitsamples\n name = self.name.ljust(justname-indent)\n title = self.title.ljust(justtitle)\n if merged:\n string = \">>> %s%s %s %10s %12s %17s %9s %s\" %\\\n (pre,name,title,xsec,nevts,sumw,norm,self.extraweight)\n for i, sample in enumerate(self.samples):\n islast = i+1>=len(self.samples)\n if \"├─ \" in pre or \"└─ \" in pre or indent==0:\n pline = color(\"│ \") if colpass else \"│ \" # line passing merged samples\n subpre = pre.replace(\"├─ \",pline)\n else:\n subpre = pre+' '*3\n subpre += \"└─ \" if (islast and not split_) else \"├─ \"\n colpass = split_ and islast\n string += \"\\n\" + sample.row(pre=subpre,indent=indent+3,justname=justname,justtitle=justtitle,split=split,colpass=colpass)\n else:\n string = \">>> %s%s %s\"%(pre,name,title)\n if split_:\n string += self.splitrows(indent=indent,justname=justname,justtitle=justtitle)\n return string", "def generate_table(self, rows):\n ...", "def _separate_columns(self, length):\n columns = ['']*length\n for signal in self.data:\n for i, char in enumerate(signal):\n columns[i] += char\n return columns", "def format_row(values, num_decimal=3):\n new_vals = []\n for val in values:\n if np.isnan(val):\n new_val = NA_REP\n elif isinstance(val, numbers.Number):\n new_val = text_util.format_num(val, num_decimal=num_decimal)\n else:\n new_val = val\n new_vals.append(new_val)\n\n return new_vals", "def test_get_separator_space():\n # GIVEN a line with spaces\n line = \"one two three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert space is returned\n assert sep == \" \"", "def train_transpose(string):\r\n \r\n data = []\r\n linedata = []\r\n worddata = []\r\n for letter in string:\r\n if letter == \"\\n\":\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n linedata = []\r\n worddata = []\r\n elif letter == \" \" or letter == \":\":\r\n linedata.append(worddata)\r\n worddata = []\r\n else:\r\n worddata.append(letter)\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n return data", "def _layout_as_winter_columns(self) -> None:\r\n self.sep_vertical = '❄☂🌧☂❄'\r\n self.sep_cross = '❄☂🌧☂❄'\r\n self.sep_horizontal = 'ˣ'", "def addSplit(self):\n pass", "def renderGridRow(self,rowIndex,whitespaceSet=[]):\n rowstr = \"\"\n internalRowIndex = self.minRow + rowIndex\n for c in range(self.minCol,self.maxCol,1):\n gridval = self.grid.get(self.createKey(c,internalRowIndex),self.defaultVal)\n if gridval not in whitespaceSet:\n rowstr += str(gridval)\n else:\n rowstr += \" \"\n return rowstr", "def _split_line( self, data_list, line_num, text ):\n\t\t# if blank line or context separator, just add it to the output list\n\t\tif not line_num:\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# if line text doesn't need wrapping, just add it to the output list\n\t\tsize = len( text )\n\t\tmax_len = self._wrapcolumn\n\t\tif ( size <= max_len ) or ( ( size - ( text.count( '\\0' ) * 3 ) ) <= max_len ):\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# scan text looking for the wrap point, keeping track if the wrap\n\t\t# point is inside markers\n\t\ti = 0\n\t\tn = 0\n\t\tmark = ''\n\t\twhile n < max_len and i < size:\n\t\t\tif text[i] == '\\0':\n\t\t\t\ti += 1\n\t\t\t\tmark = text[i]\n\t\t\t\ti += 1\n\t\t\telif text[i] == '\\1':\n\t\t\t\ti += 1\n\t\t\t\tmark = ''\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tn += 1\n\n\t\t# wrap point is inside text, break it up into separate lines\n\t\tline1 = text[:i]\n\t\tline2 = text[i:]\n\n\t\t# if wrap point is inside markers, place end marker at end of first\n\t\t# line and start marker at beginning of second line because each\n\t\t# line will have its own table tag markup around it.\n\t\tif mark:\n\t\t\tline1 += '\\1'\n\t\t\tline2 = '\\0' + mark + line2\n\n\t\t# tack on first line onto the output list\n\t\tdata_list.append( ( line_num, line1 ) )\n\n\t\t# use this routine again to wrap the remaining text\n\t\tself._split_line( data_list, '>', line2 )", "def test_nl_separated_values(self, test_input, expected, sc):\n assert sc.add(test_input) == expected", "def _transform_row_model(\n rowkey: str, row: dict, sep: str\n) -> models.RowModelOdd:\n keys = [\"sid\", \"lid\", \"mid\", \"mkt\", \"seq\", \"per\", \"vendor\", \"ts\"]\n info_list: list = [\"s\", \"per\", \"et\"]\n\n row_dict = dict(zip(keys, rowkey.split(sep)))\n row_dict[\"info\"] = {}\n row_dict[\"odds\"] = {}\n row_model = models.RowModelOdd(**row_dict)\n \n info_dict: dict = {}\n for col in info_list:\n col_name = \":\".join([\"info\", col])\n info_dict[col] = row[col_name.encode(\"utf-8\")]\n row_model.info = models.OddInfoModel(**info_dict)\n\n target_cols = _get_target_column_list(row_model.mkt)\n odds_dict: dict = {}\n for col in target_cols:\n col_name = \":\".join([\"odds\", col])\n odds_dict[col] = row[col_name.encode(\"utf-8\")]\n\n odd_model = None\n mkt: str = row_model.mkt\n if mkt.startswith(\"1x2\"):\n odd_model = models.ColumnModel1x2(**odds_dict)\n elif mkt.startswith(\"ah\"):\n odd_model = models.ColumnModelAH(**odds_dict)\n else:\n odd_model = models.ColumnModelOU(**odds_dict)\n\n row_model.odds = odd_model\n\n return row_model", "def _output_padding_line(self):\n for i in range(self.num_new_columns):\n self._write_column(self.new_columns[i], '|')\n self.buf += ' '\n\n self._pad_horizontally(self.num_new_columns * 2)", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def test_delimiter(parallel, read_basic):\n text = dedent(\n \"\"\"\n COL1 COL2 COL3\n 1 A -1\n 2 B -2\n \"\"\"\n )\n expected = Table([[1, 2], [\"A\", \"B\"], [-1, -2]], names=(\"COL1\", \"COL2\", \"COL3\"))\n\n for sep in \" ,\\t#;\":\n table = read_basic(text.replace(\" \", sep), delimiter=sep, parallel=parallel)\n assert_table_equal(table, expected)", "def tokenize(\n self, text_row: Optional[List[str]], token_row: Optional[List[List[str]]]\n ):\n raise NotImplementedError", "def print_row():\n print('| | |')", "def _gen_table_style_lines(self):\n yield '.heatmap {border: none; border-collapse: collapse; border-spacing: 0}'\n yield '.heatmap td {padding: 0; margin: 0; font-family: monospace;}'", "def makeRow(step,points) :\n edge_ = float((points-1.0)/2.0)\n row_ = []\n for j in range(points) :\n rowt = []\n for i in range(len(step)) :\n rowt.append(float(-edge_*step[i]+j*step[i]))\n row_.append(rowt)\n return row_", "def startTableRow(self):\r\n self.text += \"<tr>\"", "def test_get_separator_semi():\n # GIVEN a line with commas as delimiter\n line = \"one;two;three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert None is returned\n assert sep is None", "def fix_multiallelics(cell):\n\tsplitters = [',', ';']\n\tif any(splitter in str(cell) for splitter in splitters):\n\t\tcell = re.split(';|,', cell)[0]\n\treturn cell", "def make_separator_renderable(self):\n return self.get_separator_renderable_class()(parent_bem_block=self.get_bem_block())", "def process_group(row):\n splitted_name = row.name.split(extreme_separator)\n return sorted(splitted_name) + [row[2]]", "def test_asciitable_m_sep_char_in_cell(self):\n input = '''\n| Author | yada | yada2 | yada3 | yada4 | yada5 | yada6 | yada7 |\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kelly Brazil │ │ a76d46f9ecb1eff │ kellyjonbrazil@ │ Fri Feb 4 12:14 │ refactor ignore │ 1644005656 │ │\n│ │ │ 4d6cc7ad633c97c │ gmail.com │ :16 2022 -0800 │ _exceptions │ │ │\n│ │ │ ec0e99001a │ │ │ │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kevin Lyter │ │ 6b069a82d0fa19c │ lyterk@sent.com │ Thu Feb 3 18:13 │ Add xrandr to l │ 1643940838 │ │\n│ │ │ 8d83b19b934bace │ │ :58 2022 -0800 │ ib.py │ │ │\n│ │ │ 556cb758d7 │ │ │ │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kevin Lyter │ │ 6b793d052147406 │ lyterk@sent.com │ Thu Feb 3 18:13 │ Clean up types │ 1643940791 │ │\n│ │ │ f388c4d5dc04f50 │ │ :11 2022 -0800 │ │ │ │\n│ │ │ 6a3456f409 │ │ │ │ │ │\n│ │ │ │ │ │ * | operator = │ │ │\n│ │ │ │ │ │ > Union[] │ │ │\n│ │ │ │ │ │ * Rem │ │ │\n│ │ │ │ │ │ ove unused impo │ │ │\n│ │ │ │ │ │ rt Iterator │ │ │\n│ │ │ │ │ │ * R │ │ │\n│ │ │ │ │ │ emove comment │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n│ Kevin Lyter │ │ ce9103f7cc66689 │ lyterk@sent.com │ Thu Feb 3 18:12 │ Delete old file │ 1643940766 │ │\n│ │ │ 5dc7840d32797d8 │ │ :46 2022 -0800 │ s in template f │ │ │\n│ │ │ c7274cf1b8 │ │ │ older │ │ │\n├─────────────────┼─────────────┼─────────────────┼─────────────────┼─────────────────┼─────────────────┼────────────┼─────────────────┤\n '''\n expected = [\n {\n \"author\": \"Kelly Brazil\",\n \"yada\": None,\n \"yada2\": \"a76d46f9ecb1eff\\n4d6cc7ad633c97c\\nec0e99001a\",\n \"yada3\": \"kellyjonbrazil@\\ngmail.com\",\n \"yada4\": \"Fri Feb 4 12:14\\n:16 2022 -0800\",\n \"yada5\": \"refactor ignore\\n_exceptions\",\n \"yada6\": \"1644005656\",\n \"yada7\": None\n },\n {\n \"author\": \"Kevin Lyter\",\n \"yada\": None,\n \"yada2\": \"6b069a82d0fa19c\\n8d83b19b934bace\\n556cb758d7\",\n \"yada3\": \"lyterk@sent.com\",\n \"yada4\": \"Thu Feb 3 18:13\\n:58 2022 -0800\",\n \"yada5\": \"Add xrandr to l\\nib.py\",\n \"yada6\": \"1643940838\",\n \"yada7\": None\n },\n {\n \"author\": \"Kevin Lyter\",\n \"yada\": None,\n \"yada2\": \"ce9103f7cc66689\\n5dc7840d32797d8\\nc7274cf1b8\",\n \"yada3\": \"lyterk@sent.com\",\n \"yada4\": \"Thu Feb 3 18:12\\n:46 2022 -0800\",\n \"yada5\": \"Delete old file\\ns in template f\\nolder\",\n \"yada6\": \"1643940766\",\n \"yada7\": None\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def fix_horizontal(line):\n\tline = line.rstrip()\n\tline = untabify(line, tab_width)\n\treturn line + '\\n'", "def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left',\n separateRows=False, prefix='', postfix='', wrapfunc=lambda x:x):\n # closure for breaking logical rows to physical, using wrapfunc\n def rowWrapper(row):\n newRows = [wrapfunc(item).split('\\n') for item in row]\n return [[substr or '' for substr in item] for item in map(None,*newRows)]\n\n\n # break each logical row into one or more physical ones\n logicalRows = [rowWrapper(row) for row in rows]\n # columns of physical rows\n columns = map(None,*reduce(operator.add,logicalRows))\n # get the maximum of each column by the string length of its items\n maxWidths = [max([len(str(item)) for item in column]) for column in columns]\n rowSeparator = headerChar * (len(prefix) + len(postfix) + sum(maxWidths) + \\\n len(delim)*(len(maxWidths)-1))\n # select the appropriate justify method\n justify = {'center':str.center, 'right':str.rjust, 'left':str.ljust}[justify.lower()]\n\n\n output=StringIO()\n if separateRows: print >> output, rowSeparator\n for physicalRows in logicalRows:\n for row in physicalRows:\n print >> output, \\\n prefix \\\n + delim.join([justify(str(item),width) for (item,width) in zip(row,maxWidths)]) \\\n + postfix\n if separateRows or hasHeader: print >> output, rowSeparator; hasHeader=False\n return output.getvalue()", "def __init__(self,\n row=None,\n separator='',\n skip_empty=False,\n max_column_width=None):\n self._widths = []\n self._subtable = None\n self._max_column_width = max_column_width\n if row:\n for i in range(len(row)):\n self._ProcessColumn(i, row, len(separator), skip_empty)", "def endTableRow(self):\r\n self.text += \"</tr>\"\r\n if self.verbosity >= 1 : print \" \"", "def line(self,i):\n ans = ' '.join(self._colentries[i])+'\\n'\n return ans", "def tabulate(items: typing.List[str]):\n rows, columns = find_shape(len(items))\n extra = (rows * columns) - len(items)\n items += [' '] * extra\n items = [\n [f'{items[i][0]}-{items[i + columns - 1][0]}', *items[i:i + columns]]\n for i in range(0, len(items), columns)\n ]\n items = [[column[i] for column in items] for i in range(columns + 1)]\n items = ['| ' + ' | '.join(row) + ' |' for row in items]\n items.insert(1, ('| --- ' * rows) + '|')\n return '\\n'.join(items)", "def add_row(self, *column_data):\n if not self.col_widths:\n self.col_widths = [len(data) for data in column_data]\n row = \"| \"\n row += \" | \".join(f\"{data:{w}}\"\n for (data, w) in zip(column_data, self.col_widths))\n row += \" |\\n\"\n self.result += row", "def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers", "def blank_line_before_underline(): # noqa: D416", "def escape_rows(self, rows: list):\n def to_tuple(values):\n rv = []\n for column, value in zip(self.columns, values):\n rv.append(self.columns.get(column).escape(value))\n return tuple(rv)\n\n for idx, row in enumerate(rows):\n rows[idx] = '({})'.format(', '.join(map(str, to_tuple(row))))\n\n return rows", "def create_numbers_table():\n work_tuples = parse_columns()\n print('\\n\\n\\n ----- Tableau récapitulatif -----')\n print('-----------------------')\n for ii in work_tuples:\n line = '|'\n for ij in ii:\n line += ' ij |'\n print(line)\n print('-----------------------')", "def numberize(\n self, text_row: Optional[List[str]], token_row: Optional[List[List[str]]]\n ):\n raise NotImplementedError", "def _generate_rowklass(self):\n header = six.next(self.resolved)\n clean = []\n for h in header:\n underscoreless = h.strip().lower().replace(' ', '_').replace('.', '_')\n specialless = underscoreless.replace('(', '').replace(')', '').replace('?', '').replace('-', '')\n if specialless == '':\n clean.append(specialless)\n continue\n try:\n num = int(specialless[0])\n numbers = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',\n 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten'}\n numless = numbers[num] + specialless[1:]\n cleaned = numless\n except ValueError:\n cleaned = specialless\n\n more = 1\n while cleaned in clean:\n more += 1\n cleaned += str(more)\n\n clean.append(cleaned)\n\n for i, v in enumerate(clean):\n if v == '':\n clean[i] = 'field_' + str(i)\n self.rowklass = collections.namedtuple('RowKlass', clean)", "def test_add_space_to_lines(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"4.6\"),\n after_sel=(\"2.0\", \"4.7\"),\n command_name=\"add-space-to-lines\",\n )", "def test_add_tab_to_lines(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"5.6\"),\n after_sel=(\"2.0\", \"5.10\"),\n command_name=\"add-tab-to-lines\",\n )", "def rehydrate_thematic_break(cls, next_token):\n return next_token.extracted_whitespace + next_token.rest_of_line + \"\\n\"", "def _render_tabstops(cls, code_lines):\n\t\trendered_lines = []\n\t\t\n\t\tfor line in code_lines:\n\t\t\trendered = ''\n\t\t\twhile '\\t' in line:\n\t\t\t\tpre, tab, line = line.partition('\\t')\n\t\t\t\trendered += pre\n\t\t\t\trendered += ' '*(cls.TAB_STOP - (len(rendered) % cls.TAB_STOP))\n\t\t\trendered += line\n\t\t\trendered_lines.append(rendered)\n\t\treturn rendered_lines", "def _pipe_line_with_colons(colwidths, colaligns):\n if not colaligns: # e.g. printing an empty data frame (github issue #15)\n colaligns = [\"\"] * len(colwidths)\n segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]\n return \"|\" + \"|\".join(segments) + \"|\"", "def __format_lines(cls, lines):\n\n result = []\n\n for line in [x for x in lines if x]:\n if not line.startswith(\"#\"):\n if \"#\" in line:\n line = line[: line.find(\"#\")]\n\n if \"\\t\" in line or \" \" in line:\n splited_line = line.split()\n\n for element in splited_line[:1]:\n if element:\n line = element\n break\n result.append(line)\n\n return result", "def autosplit(self):\n result = RowSet()\n for row in self:\n result.append(row.autosplit())\n return result", "def _line_wrapper( self, diffs ):\n\n\t\t# pull from/to data and flags from mdiff iterator\n\t\tfor fromdata, todata, flag in diffs:\n\t\t\t# check for context separators and pass them through\n\t\t\tif flag is None:\n\t\t\t\tyield fromdata, todata, flag\n\t\t\t\tcontinue\n\t\t\t( fromline, fromtext ), ( toline, totext ) = fromdata, todata\n\t\t\t# for each from/to line split it at the wrap column to form\n\t\t\t# list of text lines.\n\t\t\tfromlist, tolist = [], []\n\t\t\tself._split_line( fromlist, fromline, fromtext )\n\t\t\tself._split_line( tolist, toline, totext )\n\t\t\t# yield from/to line in pairs inserting blank lines as\n\t\t\t# necessary when one side has more wrapped lines\n\t\t\twhile fromlist or tolist:\n\t\t\t\tif fromlist:\n\t\t\t\t\tfromdata = fromlist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\tfromdata = ( '', ' ' )\n\t\t\t\tif tolist:\n\t\t\t\t\ttodata = tolist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\ttodata = ( '', ' ' )\n\t\t\t\tyield fromdata, todata, flag", "def _get_separator(num, sep_title, sep_character, sep_length):\n left_divider_length = right_divider_length = sep_length\n if isinstance(sep_length, tuple):\n left_divider_length, right_divider_length = sep_length\n left_divider = sep_character * left_divider_length\n right_divider = sep_character * right_divider_length\n title = sep_title.format(n=num + 1)\n\n return \"{left_divider}[ {title} ]{right_divider}\\n\".format(\n left_divider=left_divider, right_divider=right_divider, title=title\n )", "def get_separator(self):\r\n \r\n return self._separator", "def strip_trailing_rows(self):\n\n rows = list()\n strip_mode = True\n for rownum, row in enumerate(reversed(self.cells)):\n len_row = len(row)\n num_empty = len([cell for cell in row if cell.type == 'empty'])\n if num_empty != len_row:\n strip_mode = False\n\n if num_empty == len_row and strip_mode:\n continue\n rows.append(row)\n\n self.cells = list(reversed(rows))" ]
[ "0.62396944", "0.61622435", "0.5958048", "0.5942915", "0.5919921", "0.59172857", "0.5905476", "0.58589554", "0.5857645", "0.58561033", "0.58199805", "0.57991666", "0.57600623", "0.5754019", "0.5714601", "0.56919146", "0.5561975", "0.5558327", "0.5547988", "0.5547977", "0.55221075", "0.55208313", "0.54971796", "0.5481213", "0.5481213", "0.5470328", "0.54496425", "0.54412127", "0.5439893", "0.541674", "0.54109025", "0.5408782", "0.54075766", "0.5406561", "0.53815955", "0.53536886", "0.5338386", "0.5321656", "0.5320227", "0.5307088", "0.5295557", "0.5283749", "0.527784", "0.52751607", "0.5273297", "0.5253362", "0.52450895", "0.5237979", "0.5236584", "0.5233123", "0.52287227", "0.52261364", "0.5223572", "0.5220899", "0.521962", "0.5214442", "0.5211014", "0.52090716", "0.5188285", "0.5185003", "0.51588535", "0.5158718", "0.5149573", "0.5147091", "0.5144098", "0.5144098", "0.5140586", "0.51353437", "0.5134117", "0.5130352", "0.5127502", "0.5124118", "0.51218575", "0.51204133", "0.511239", "0.51116073", "0.5103484", "0.5102479", "0.51020354", "0.5095294", "0.5071324", "0.5067817", "0.50661737", "0.50639856", "0.50601804", "0.5056067", "0.5053171", "0.50527585", "0.5032278", "0.50257313", "0.5025215", "0.5019124", "0.50182986", "0.50141644", "0.501392", "0.5012042", "0.500494", "0.49989796", "0.49967176", "0.49946946", "0.4988794" ]
0.0
-1
Save the DataSets as CSV
def save_datasets(ds, pdf_name, directory=''): print(f'{directory}{pdf_name.split("/")[-1].replace(".pdf", "")}_0..{len(ds)}.csv') list(map( lambda tb: tb[1].to_csv( f'{directory}{pdf_name.split("/")[-1].replace(".pdf", "")}_{tb[0]}.csv', index=False), enumerate(ds)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def save_dataset_csv(self, path):\n cols = list(self.data_dict.keys())\n df = pd.DataFrame(self.data_dict, index=None, columns=cols)\n df.to_csv(path, index=True)", "def export_to_csv(self, csv_path, data_sets=None, data_sets_operations=None):\n if not data_sets:\n data_sets = self._data_sets.keys()\n with open(csv_path, \"wb\") as csv_file:\n for data_set in data_sets:\n data = [data_set] + [str(x) for x in self._data_sets[data_set]]\n csv_file.write(\",\".join(data) + \"\\n\")\n for operation, operand1, operand2 in data_sets_operations:\n data1 = self._data_sets[operand1]\n data2 = self._data_sets[operand2]\n data = [\"%s %s %s\" % (operand1, operation, operand2)] + \\\n [str(eval(\"%s %s %s\" % (couple[0], operation, couple[1]))) for couple in zip(data1, data2)]\n csv_file.write(\",\".join(data) + \"\\n\")", "def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))", "def save(self, data, outpath):\n data.to_csv(outpath)", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def save(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tprint('saving:', csvPath)\n\t\tself.dfAnalysis.to_csv(csvPath)", "def save_clean_data(self):\n for data in self.clean_data:\n file_name = \"../data/clean_data/\" + data.file.name + data.file.extension\n data.save_csv(file_name)", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)", "def to_csv(self, dataset):\n save_as = filedialog.asksaveasfilename(defaultextension='.csv')\n try:\n with open(save_as, 'w', newline='') as file:\n scribe = csv.writer(file)\n scribe.writerow(HEADERS)\n for row in dataset:\n scribe.writerow(row.values())\n self.info_success(save_as)\n except IOError:\n self.info_error()\n return", "def save(self):\r\n self.df_app_data = self.df_app_data.to_csv(\"app_data.csv\", index=False)", "def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]", "def save_datasets(self):\n if self.processed_extension == '.csv':\n # Save to csv\n logger.info(f'Saving sets to csv:')\n \n # TRAIN\n logger.info(f'train: {self.train_path}')\n \n # Concatenate X and y\n train_data = self.train_data[0]\n train_data['TARGET'] = self.train_data[1]\n \n # Save as csv\n train_data.to_csv(self.train_path, index = False)\n \n \n # VAL\n logger.info(f'val: {self.val_path}')\n \n # Concatenate X and y\n val_data = self.val_data[0]\n val_data['TARGET'] = self.val_data[1]\n \n # Save as csv\n val_data.to_csv(self.val_path, index = False)\n \n # TEST\n logger.info(f'test: {self.test_path}')\n \n # Concatenate X and y\n test_data = self.test_data[0]\n test_data['TARGET'] = self.test_data[1]\n \n # Save as csv\n self.test_data.to_csv(self.test_path, index = False)\n \n elif self.processed_extension == '.npz':\n # Convert y to numpy array\n if isinstance(self.train_data[1], pd.Series):\n self.train_data[1] = self.train_data[1].to_numpy()\n if isinstance(self.val_data[1], pd.Series):\n self.val_data[1] = self.val_data[1].to_numpy()\n if isinstance(self.test_data[1], pd.Series):\n self.test_data[1] = self.test_data[1].to_numpy()\n \n # Save to npz (scipy sparse)\n logger.info(f'Saving sets to npz:')\n\n logger.info(f'train: {self.train_path}')\n train_data = [self.train_data[0], np.reshape(self.train_data[1], (-1,1))]\n sparse.save_npz(self.train_path, sparse.hstack(train_data))\n \n logger.info(f'val: {self.val_path}')\n val_data = [self.val_data[0], np.reshape(self.val_data[1], (-1,1))]\n sparse.save_npz(self.val_path, sparse.hstack(val_data))\n\n logger.info(f'test: {self.test_path}')\n test_data = [self.test_data[0], np.reshape(self.test_data[1], (-1,1))]\n sparse.save_npz(self.test_path, sparse.hstack(test_data))\n\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n \n self.input_size = self.train_data[0].shape[1]\n logger.info(f'Saved datasets.')", "def export_dataset(self):\n raise NotImplementedError", "def write_to_csv(self, data_points):\n keys = data_points[0].keys()\n with open(self.report_path, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(data_points)", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def save_data(self, output_file):\n self._remove_redundant_columns()\n self.dataframe.to_csv(output_file, sep=',', encoding='utf-8')", "def write_csv(self):\n self.tableView.df.to_csv('Data export.csv', index=False)\n print('CSV file exported')", "def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise", "def save_items_to_csv(items_data: pd.DataFrame):\n with open('etsy_items.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerows(items_data)", "def _export_data_to_csv(self, source, target):\n self.log.info(f\"Dumping data into {target}\")\n source.to_csv(target, index=False)", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)", "def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def to_csv(self, filename, **kwargs):\n self.data.to_csv(filename, **kwargs)", "def get_csv_string(self):\n df = None\n for d in self.data:\n if df is None:\n df = d.as_dataframe()\n else:\n df = df.append(d.as_dataframe())\n\n if df is None:\n return \"\"\n else:\n return df.to_csv(index=False)", "def save_values(self):\n f_name = self.img_path.split('.')[0] + '_{}_'.\\\n format(self.data_type_name) + '.csv'\n dir_name = os.path.join(self.base_dir, f_name)\n if not os.path.exists(dir_name):\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)\n else:\n os.remove(f_name)\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)", "def saveData(self):\n pdIds = pd.DataFrame.from_dict(self.pathIds, orient='index')\n pdCrr = pd.DataFrame.from_dict(self.pathCrr, orient='index', columns=['cid'])\n mergedData = pd.concat([pdIds, pdCrr['cid']], axis=1, ignore_index=False)\n\n # Create the save dialog box\n name, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File',\n '', 'csv files (*.csv)', 'csv file (*.csv)')\n\n if not name:\n return\n # Check the extension when saving\n if self.csvExt in name:\n mergedData.to_csv(name, header=False, index=True)\n else:\n message = 'Error saving file {}.'.format(name)\n self.messageBox(message)", "def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))", "def toCsv(self, csv_path):\n ser = pd.Series(self)\n ser.to_csv(csv_path)", "def save_to_csv(self):\r\n # Save the read values to a csv file\r\n with open(self.fname, \"a\") as f:\r\n wr = csv.writer(f, dialect='excel')\r\n wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,\r\n self.soc0, self.set_val, self.P_ac, self.P_bat])", "def save_data_csv(self, filename):\n #add masked entry as last column\n fields = numpy.r_[self.colLabels, ['masked']]\n\n #add dynamic expression to column headers\n for k, col in enumerate(self.dynamic_cols):\n fields[col] += \" [%s]\"%self.dynamic_expressions[k] if self.dynamic_expressions[k] else ''\n\n #add custom labels to field names \n for col, fieldname in enumerate(fields):\n custom_label = self.column_labels_custom.get(col)\n fields[col] += \" (%s)\"%custom_label if custom_label else ''\n\n fields[col] += \" {*}\" if (col in self.colsel and (fieldname.find('user')==0 or col in self.dynamic_cols)) else ''\n \n #add options\n \n \n #don't save last two lines\n data = numpy.c_[self.data[:-2], self.rowmask[:-2]]\n\n with open(filename, 'wb') as f:\n import csv\n writer = csv.writer(f)\n writer.writerow(fields)\n #writer.writerows(data)\n for row in data:\n r = [entry.encode('latin_1') if type(entry) is types.UnicodeType else entry for entry in row]\n writer.writerow(r)\n self.modified = False", "def write_file(file):\n file.to_csv('data_set.csv', encoding='utf-8', index=False)", "def export_data(self):\r\n stocks = {}\r\n headings = ['Security', 'Price', 'Change', 'Change %', '52 Week', 'Market Cap']\r\n\r\n for data in range(6):\r\n for items in self.root.main.treeview.get_children():\r\n values = self.root.main.treeview.item(items, 'values')\r\n if headings[data] not in stocks:\r\n stocks[headings[data]] = []\r\n stocks.get(headings[data]).append(values[data])\r\n\r\n df = pd.DataFrame(stocks, columns=headings)\r\n path = tk.filedialog.asksaveasfilename(title='Save File As...',\r\n filetypes=((\"CComma-separated values (.csv)\", \"*.csv\"), (\"Text Document(.txt)\", \"*.txt\")))\r\n\r\n if not path:\r\n return\r\n else:\r\n df.to_excel(path, index=False, header=True)", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def to_csv(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving results into a csv (comma separated values) file.\")\n v=np.array([list(self.initialConcentration.values()),\n list(self.fitting_error.values()),\n list(self.k.values()),\n list(self.Fb.values()),\n list(self.slope.values())]).T\n k=list(self.initialConcentration.keys())\n d=pd.DataFrame(v,columns=['Initial Concentration','Fitting Error','k','Fb','Slope'],index=k)\n fn=get_valid_fname(self.ID)\n self.csvname=\"%s_initial_concentrations.csv\"%(fn)\n self.fullcsvname=\"%s/%s_initial_concentrations.csv\"%(self.info['resultsdir'],fn)\n self.info['csvname_initialConcentration']=self.csvname\n print(self.csvname)\n d.to_csv('%s/%s'%(self.info['resultsdir'],self.csvname))", "def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())", "def export_data(self, pth):\n self.cleanup_allowed = False\n self.train_df.to_csv(os.path.join(pth, \"train.csv\"))\n self.valid_df.to_csv(os.path.join(pth, \"valid.csv\"))\n self.test_df.to_csv(os.path.join(pth, \"test.csv\"))", "def save_dataset(self):\n if self.res_dataset is None:\n return\n if self.write_path is None:\n raise Exception(\"Error: Attempted to save result dataset without ever specifiying a path to write to\")\n\n if self.format == \"arrow\":\n self.res_dataset.save_to_disk(self.write_path)\n elif self.format == \"csv\":\n self.res_dataset.to_csv(self.write_path, index = False)", "def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def save_csv(self):\n path, _ = QtWidgets.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')\n\n if not path:\n return\n\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n\n writer.writerow(self.headers.keys())\n\n for row in range(self.rowCount()):\n row_data = []\n for column in range(self.columnCount()):\n item = self.item(row, column)\n if item:\n row_data.append(str(item.text()))\n else:\n row_data.append('')\n writer.writerow(row_data)", "def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n self.__create_csv()\n try:\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writerow(self.__values)\n except IOError: # this exception avoid a product does not have saved in csv file\n time.sleep(0.5)\n self.save_csv()\n # display on the screen what is being record on csv\n for key, value in self.__values.items():\n print('{}: {}'.format(key, value), end='; ' if key != 'url' else '\\n')", "def write_to_csv(self, data):\n with open(\"out.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(self.column_names)\n writer.writerows(data)\n print(\" Updated succesfully \")", "def dbtocsv():\n connection = sqlite3.connect(\"sensordata.db\")\n cursor = connection.cursor()\n cursor.execute(\"Select * from sensordata\")\n roadstationdata = cursor.fetchall()\n\n with open('roadstationdata.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id','name','value','unit','time'])\n writer.writerows(roadstationdata)", "def export_database(self):\n base_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='CSV (*.csv)')\n database.export_to_csv(DB_PATH, base_path[0])", "def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())", "def save_combined_clean_data(self):\n df = []\n for data in self.clean_data:\n df.append(data.df)\n df = pd.concat(df, axis=0, join='outer', ignore_index=False, keys=None,\n levels=None, names=None, verify_integrity=False, copy=True)\n file_name = \"../data/clean_data/\" + \"combined_clean_data + \" + '.csv'\n df.to_csv(file_name, sep=\";\", index=False)\n\n return(df)", "def save_to_file_csv(cls, list_objs):\n list_dictionaries = []\n if list_objs is None or list_objs == []:\n string_dictionary = \"[]\"\n else:\n for _obj_dict in list_objs:\n list_dictionaries.append(_obj_dict.to_dictionary())\n string_dictionary = Base.to_json_string(list_dictionaries)\n with open(cls.__name__ + \".csv\", \"w\") as _file:\n _file.write(string_dictionary)\n _file.close()", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def save(self):\n assert self.data is not None\n with open(self._csv_path, mode=\"w\", encoding=\"utf-8\") as spp_file:\n # We don't want to save the index, as it's not especially meaningful, and makes life harder when trying to\n # restore the binary version from the csv (the index column would be imported and then need to be dropped).\n self.data.to_csv(spp_file, index=False)", "def save_csv(self, save_path=''):\n if not save_path:\n time = datetime.now()\n time = datetime.strftime(time, '%Y-%m-%d_%H:%M:%S')\n filename = time + '.csv'\n save_path = os.path.join(os.path.abspath(os.curdir), filename)\n data = self._get_data()\n with open(save_path, 'wb') as f:\n for line in data:\n f.write(line + '\\n')", "def save(self, close=True):\n rows = []\n # find out how many rows we're going to need to write\n max_rows = 0\n for _, cont in self.data:\n if len(cont) > max_rows:\n max_rows = len(cont)\n max_rows += 1 # add the header row\n\n for i in range(0, max_rows):\n row = []\n for (col_name, col_contents) in self.data:\n col_data = [col_name] + col_contents\n if len(col_data) > i:\n row.append(col_data[i])\n else:\n row.append(\"\")\n rows.insert(i, row)\n\n # Remove current contents of file\n self.file_object.seek(0)\n self.file_object.truncate()\n\n # Write new CSV data\n writer = UnicodeWriter(self.file_object, encoding=self.output_encoding)\n writer.writerows(rows)\n\n if close:\n self.file_object.close()", "def save_csv(outputfile):\n with open(outputfile, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(DATA_KEYS)\n\n # Add data to csv-file\n for data in data_list:\n writer.writerow(data)", "def to_csv(self, save_folder: Path) -> None:\n serializer = serializer_factory(fmt=SerializerEnum.CSV)\n serializer.dump(instance=self, folder_path=save_folder)", "def save_report_data(results):\n if os.path.isfile(FEED_DATA_FILE):\n pass\n\n csv_file = open(FEED_DATA_FILE, 'wt', encoding='utf-8')\n writer = csv.writer(csv_file, lineterminator='\\n')\n\n for report in results.get('reports', []):\n column_header = report.get('columnHeader', {})\n dimension_headers = column_header.get('dimensions', [])\n metric_headers = column_header.get(\n 'metricHeader', {},\n ).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n\n header_row = []\n header_row.extend(dimension_headers)\n header_row.extend([mh['name'] for mh in metric_headers])\n\n logger.debug(header_row)\n writer.writerow(header_row)\n\n for row in rows:\n dimensions_data = row.get('dimensions', [])\n access_date = ''.join(dimensions_data[0])\n _date: date = datetime.strptime(access_date, '%Y%m%d').date()\n metrics_data = [m['values'] for m in row.get('metrics', [])][0]\n\n data_row: List[str] = [str(_date)]\n data_row.extend(metrics_data)\n logger.debug(data_row)\n writer.writerow(data_row)\n\n # Close the file.\n csv_file.close()", "def SaveToCSV(self):\n import csv \n csvfile = open(f\"Cache/{self.symbol}.csv\", \"w\", newline='')\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow([self.symbol, self.name, self.market])\n writer.writerow(['Latest P/E Ratio:', self.pe_ratio])\n writer.writerow(['Short Percent of Float:', self.short_percent_of_float])\n writer.writerow(['Date', 'Price', 'Dividend', 'Annualized Dividend'])\n for snapshot in self._history:\n writer.writerow([snapshot.date.strftime(\"%m/%d/%Y\"), snapshot.price, snapshot.dividend, snapshot.annualDividend])\n csvfile.close()\n print(f\"{self.name} saved to /Cache/{self.symbol}.csv\")", "def to_csv(self, path):\n results = self.all()\n if self.stop_check is not None and self.stop_check():\n return\n results.to_csv(path)", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def export_sensor_data_to_csv(self):\n df = pd.read_sql('SELECT * FROM sensor_data', self.conn)\n df.to_csv('output/sensor_data.csv', index=False)", "def dump_gazettes_as_csv(self):\n # TODO: dump_gazettes_as_csv\n pass", "def store_csv(self):\n\n with open(self.filepath.with_suffix(\".csv\"), 'w',\n newline='') as csvfile:\n fieldnames = ['counter', 'timestamp', 'acceleration']\n writer = DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n writer.writerows(self.values)", "def save_to_file_csv(cls, list_objs):\n with open(cls.__name__ + \".csv\", \"w\", newline='') as f:\n if cls.__name__ == \"Rectangle\":\n fieldnames = ['id', 'width', 'height', 'x', 'y']\n elif cls.__name__ == \"Square\":\n fieldnames = ['id', 'size', 'x', 'y']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n if list_objs is not None:\n for model in list_objs:\n writer.writerow(model.to_dictionary())", "def to_csv(self, path):\n if os.path.isdir(path):\n shutil.rmtree(os.path.join(path))\n os.makedirs(path)\n\n for name, df in self.input_data.items():\n name += \".csv\"\n filename = os.path.join(path, name)\n df.to_csv(filename)\n logging.info(\"Scenario saved as csv-collection to %s\", path)", "def save_csv(outfile, movies):\n writer = csv.writer(outfile)\n writer.writerow(['Title', 'Rating', 'Year', 'Actors', 'Runtime'])\n for movie in movies:\n writer.writerow(movie)\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK", "def save_as_csv(self, DBNs):\n with open(\"output.csv\", \"w\") as outfile:\n # create the headers\n for i in range(0, 5):\n outfile.write(self._headers[i] + \",\") # delimits header names\n\n # moves to next line\n outfile.write(self._headers[5] + \"\\n\")\n\n # populates information\n for data in self._sat:\n if data[8] in DBNs:\n outfile.write(data[8] + \",\")\n if \",\" in data[9]:\n outfile.write(\"\\\"\"+data[9]+\"\\\"\" + \",\")\n else:\n outfile.write(data[9] + \",\")\n outfile.write(\",\".join([data[i] for i in range(10,14)]) + \"\\n\")", "def save_dataframe(dataframe, filename):\n with open(filename, \"w\", encoding=\"utf8\") as outfile: \n dataframe.to_csv(outfile, sep=\",\")", "def save_data_list(self, file_name):\n x = file_name + '__LinkList.csv'\n f = SAVING_PATH + x\n print('\\nSaving data into a CSV file [{0}]...'.format(x))\n self.webDataFrame.to_csv(f, index=False)\n\n print('Finished writing the webiste data list file: {0}\\n'.format(\n os.path.abspath(f)))", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])", "def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)", "def save_performances(self):\r\n nb_datasets = len(self.results)\r\n resu = [[] for k in range(nb_datasets)]\r\n\r\n # fetch results\r\n for k in range(nb_datasets):\r\n best = np.argmax(self.results[k]['mean_test_score'])\r\n resu[k].append(('score', self.results[k]['mean_test_score'][best]))\r\n resu[k] = resu[k] + list(self.results[k]['params'][best].items())\r\n\r\n # write results in csv\r\n for k, resu in enumerate(resu):\r\n with open('results/final_results_{}.csv'.format(k), 'a') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(resu)", "def WriteDataFrames(self, Outpath):\n\n newdataframes = self.newdataframes\n for staname in newdataframes.keys():\n fname = staname + '.TXT'\n newdataframes[staname].to_csv(Outpath + fname, float_format=\"%.2f\")\n print('--------------------')\n print('Writing dataframe')\n print('--------------------')", "def export_sampleStorage_csv(self, sample_ids_I, filename_O):\n\n data_O = [];\n for sample_id in sample_ids_I:\n data_tmp =[];\n data_tmp = self.get_rows_sampleID_limsSampleStorage(sample_id);\n data_O.extend(data_tmp);\n if data_O:\n io = base_exportData(data_O);\n io.write_dict2csv(filename_O);", "def create_model_csv(self):\n\n self.model_df.to_csv(self.model_output_file)", "def save_to_file_csv(cls, list_objs):\n ld = []\n with open(cls.__name__ + \".csv\", \"w\", encoding=\"utf-8\") as f:\n if list_objs:\n for obj in list_objs:\n if cls.__name__ == 'Rectangle':\n ld.append([\n obj.id, obj.width, obj.height, obj.x, obj.y])\n if cls.__name__ == 'Square':\n ld.append([obj.id, obj.size, obj.x, obj.y])\n writer = csv.writer(f)\n for row in ld:\n writer.writerow(row)", "def save_as_csv(time_series, data, path_and_file_name):\n\n parent_name = \"test\"\n parent_uqid = uuid.uuid4()\n\n file_obj = open(path_and_file_name, 'w')\n file_obj.write('version,'+str(2)+'\\n')\n file_obj.write('numOfCH,'+str(1)+'\\n')\n file_obj.write('type, scan\\n')\n file_obj.write('ch_type,'+str(0)+'\\n')\n\n file_obj.write('carpet pos,'+str(0)+'\\n')\n file_obj.write('parent_name,'+str(parent_name)+'\\n')\n file_obj.write('parent_uqid,'+str(parent_uqid)+'\\n')\n file_obj.write('parent_filename,'+str(path_and_file_name)+'\\n')\n\n file_obj.write('pc, 0\\n')\n file_obj.write('Time (ns), CH0 Auto-Correlation\\n')\n for time_step in range(0, time_series.shape[0]):\n file_obj.write(str(float(time_series[time_step]))+','+str(data[time_step])+ '\\n')\n file_obj.write('end\\n')\n\n file_obj.close()", "def mscoco_to_csv(sentence_sets, output_file_name):\n mscoco_data = {}\n for i in range (0, 2):\n mscoco_data[\"sentence\" + str(i)] = [subset[i] for subset in sentence_sets]\n mscoco_df = pd.DataFrame(data=mscoco_data)\n mscoco_csv = mscoco_df.to_csv(output_file_name, index=False)\n return mscoco_csv", "def save_reviews_to_csv(language, review_list, dataset):\n with open('reviews_'+dataset+'_'+language+'.csv', 'w') as csvfile:\n fieldnames = review_list[0].__dict__.keys()\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for review in review_list:\n writer.writerow(review.__dict__)", "def to_csv(data_path):\n news_df, price_df = load_data(data_path)\n\n combined_df = combine_stock_news(news_df, price_df)\n\n combined_df.to_csv(data_path + \"news_price_df.csv\")", "def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')", "def export_to_csv(self, request, queryset):\n fields = self.get_table_fields()\n field_names = [field.name for field in fields]\n field_verbose_names = [field.verbose_name.encode(\n 'utf-8'\n ) for field in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; \\\nfilename=%s.csv' % unicode(self.model._meta).replace('.', '_')\n\n writer = csv.writer(response)\n writer.writerow(field_verbose_names)\n for obj in queryset:\n writer.writerow([unicode(getattr(obj, field)).encode(\n \"utf-8\",\n \"replace\"\n ) for field in field_names])\n return response", "def save_to_csv(data):\n print(\"Saving file...\")\n\n data = [\"year,rank,company,revenue ($ millions),profit ($ millions)\"] + data\n data = [row.replace(\", \", \"; \").replace(\"\\\"\", \"\") for row in data] # list comprehension\n\n with open(CSV_PATH, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=\",\")\n for row in data:\n spamwriter.writerow(row.split(\",\"))", "def __create_output_csv(self, df, score_list, elapsed_list):\n df['Similar']=score_list\n df['Elapsed']=elapsed_list\n df.to_csv('Output.csv',index=False)\n return df", "def export_to_csv(da_locals, selection_widget, out):\n df_name = selection_widget.value\n da_locals[df_name].to_csv(\"output/{}.csv\".format(df_name), index=False)\n out.clear_output()\n out.append_display_data(FileLinks(\"output\"))", "def save_csv(entities):\n project_root = os.environ['PYTHONPATH']\n\n with open('{}/data/processed/fda_tags.csv'.format(project_root), 'w', newline=\"\") as output_file:\n writer = csv.writer(output_file)\n for entity, file_name in entities.items():\n writer.writerow([entity, file_name])", "def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])", "def export_data(self):\n folder = os.path.dirname(self.filename[0])\n filename_ext = os.path.basename(self.filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def _write(self):\n # Reload\n with portalocker.Lock(self.filename, 'w') as fh:\n self.data.to_csv(fh, index=False)\n fh.flush()\n os.fsync(fh.fileno())", "def save_to_file_csv(cls, list_objs):\n f_name = cls.__name__ + \".csv\"\n with open(f_name, 'w', newline='') as f:\n if list_objs is None or list_objs == []:\n f.write(\"[]\")\n\n else:\n if cls.__name__ == 'Rectangle':\n h = ['id', 'width', 'height', 'x', 'y']\n else:\n h = ['id', 'size', 'x', 'y']\n ncsv = csv.DictWriter(f, fieldnames=h)\n for obj in list_objs:\n ncsv.writerow(obj.to_dictionary())", "def save_csv(companies):\n print(\"Saving companies.csv...\")\n\n Path(\"output\").mkdir(parents=True, exist_ok=True)\n file_name = 'output/companies.csv'\n\n with open(file_name, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n i = 0\n while i < 500:\n company = companies[i]\n name = company.text\n url = company.get_attribute('href')\n writer.writerow([name, url])\n i = i + 1\n \n print('companies.csv created')", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def download_report():\n entities = get_names()\n save_csv(entities)", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def writeCSV(filename, separator, data):\n \n filetowrite = open(filename, \"w\")\n values = []\n i = 0 #Count the number of objects already written\n for item in data:\n filetowrite.write(item)\n i += 1\n if i < len(data.keys()):\n filetowrite.write(separator)\n values.append(data[item])\n filetowrite.write(\"\\n\")\n i = 0\n for value in values:\n filetowrite.write(str(value))\n i += 1\n if i < len(values):\n filetowrite.write(separator)\n \n filetowrite.close()" ]
[ "0.7957732", "0.79157484", "0.7566766", "0.7396046", "0.73250043", "0.7290871", "0.72067684", "0.71864974", "0.71633387", "0.7068782", "0.7042488", "0.7025496", "0.70245546", "0.69767153", "0.69759953", "0.69569284", "0.69508433", "0.69366264", "0.68940073", "0.68940073", "0.68903106", "0.6876174", "0.6870607", "0.6864478", "0.6853691", "0.6830742", "0.6778635", "0.67677474", "0.67514336", "0.6728555", "0.6713456", "0.6694881", "0.6660644", "0.6659994", "0.66529", "0.66495067", "0.6648191", "0.6643133", "0.66367924", "0.6624685", "0.6620208", "0.6613327", "0.6604277", "0.66028786", "0.660183", "0.66008776", "0.6583416", "0.65812254", "0.6580136", "0.657819", "0.65704995", "0.6561609", "0.65520865", "0.6547194", "0.65411586", "0.6527427", "0.6526692", "0.6525514", "0.6525279", "0.6514392", "0.64904046", "0.64879084", "0.64831525", "0.64752185", "0.64746654", "0.6472253", "0.6471616", "0.6470988", "0.64563036", "0.64554", "0.64313734", "0.64253217", "0.6424575", "0.6423252", "0.64145535", "0.64105517", "0.6399268", "0.6387162", "0.638598", "0.6378817", "0.637559", "0.637386", "0.63683134", "0.63658893", "0.6362019", "0.6361938", "0.6353671", "0.63506085", "0.63498944", "0.6335732", "0.6333448", "0.6327914", "0.63263327", "0.6321443", "0.6318597", "0.6316287", "0.6305996", "0.62987334", "0.62982833", "0.6298079" ]
0.6633306
39
Extract a table from a pdf file and save the resulting dataset
def extract_and_save(file_path, out_dir=''): save_datasets( extract_table(file_path), file_path, out_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_table(path):\n re_ex = RE_EX\n pages = []\n page_num = 1\n with open(path, 'rb') as in_file:\n parser = PDFParser(in_file)\n doc = PDFDocument(parser)\n for page in PDFPage.create_pages(doc):\n rsrcmgr = PDFResourceManager()\n output_string = StringIO()\n device = TextConverter(rsrcmgr, output_string, laparams=LAParams())\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n interpreter.process_page(page)\n finder = re.search(re_ex, output_string.getvalue(), re.IGNORECASE)\n print('Searching table', '\\tCurrent page:', page_num)\n if finder:\n print('Table finded.')\n pages.append(page_num)\n break\n\n page_num += 1\n\n table = extract_text(path, pages)\n table = isolate(table)\n table = add_separations(table)\n\n return table", "def extract_pdf_tables(pdf_file):\n dfs = tabula.read_pdf(pdf_file, pages='all', multiple_tables=True)\n tables = []\n\n for df in dfs:\n row_k_to_items = {}\n for _, row_str_map in df.to_dict().items():\n for row_k, row_str_value in row_str_map.items():\n\n if row_k not in row_k_to_items:\n row_k_to_items[row_k] = []\n if isinstance(row_str_value, str):\n row_str_value = row_str_value.replace('-', '0')\n row_str_value = re.sub(r'[^0-9\\s]', '', row_str_value)\n row_str_value = re.sub(r'\\s+', ' ', row_str_value).strip()\n if row_str_value:\n row_k_to_items[row_k].append(row_str_value)\n elif isinstance(row_str_value, int):\n row_k_to_items[row_k].append(row_str_value)\n\n table = list(row_k_to_items.values())\n tables.append(table)\n return tables", "def exportTable(self):\n\t\tself.pdf = \tself.dir + \"/application.pdf\"\n\t\tpdf = pisa.CreatePDF(\n\t\t\tfile(self.html, \"r\" ),\n\t\t\tfile(self.pdf, \"wb\")\n\t\t\t)", "def parse_tables_from_pdf(self, pdf_filename: str) -> Dict[str, List[Table]]:\n pdf_tokens, pdf_images = self.pdf_extractor.load_tokens_and_image(\n pdf_filename, resize_image=True\n )\n\n return self.parse_tables_from_pdf_data(pdf_tokens, pdf_images)", "def import_tables(file, pages):\n tables = camelot.read_pdf(\n file, pages=pages,\n flavor='stream',\n )\n return tables", "def parse(self):\n dfs = tabula.read_pdf(self.path, pages='all')\n\n ret = []\n for df in dfs:\n ret.append(df.to_csv())\n\n return ret", "def extract_text(infile):\n # Get text from mudraw\n text = subprocess.check_output(['mudraw', '-F', 'txt', infile])\n\n # Cleanup raw text\n match = re.search(\n r'.*?Activity \\/ Remarks(?P<table1>.*?)Activities not shown on the ' +\n r'DABS Chart Side:.*?Activity \\/ Remarks(?P<table2>.*?)For detailed ' +\n r'information regarding the DABS',\n text,\n re.MULTILINE | re.DOTALL)\n if not match:\n raise ExtractionError('Could not extract text from PDF.')\n false_or_none_string = lambda x: bool(x) and x.lower() != 'none'\n data = '\\n\\n\\n'.join(match.groups())\n raw_parts = re.sub(r'\\n[ \\t]+\\n', '\\n\\n', data).split('\\n\\n\\n')\n parts = filter(false_or_none_string, map(lambda x: x.strip(), raw_parts))\n\n # Write CSV\n headers = (\n b'Firing-Nr\\nD-/R-Area\\nNOTAM-Nr',\n b'Validity UTC',\n b'Lower Limit\\nAMSL or FL',\n b'Upper Limit\\nAMSL or FL',\n b'Location',\n b'Center Point',\n b'Covering Radius',\n b'Activity / Remarks',\n )\n rows = []\n for i, part in enumerate(parts):\n # Regexes\n multiple_newlines_re = re.compile(r'\\n+')\n height_re = re.compile(r'(GND|[0-9]+m \\/ [0-9]+ft|FL[0-9]{2,3}|REF AIP)')\n center_radius_re = re.compile(r'([0-9]{6}N [0-9]{7}E)\\s+?(.*?NM)')\n\n # Separate columns (warning: hackish code ahead!)\n row = {}\n step1 = re.split(r'([0-2][0-9][0-6][0-9] - [0-2][0-9][0-6][0-9])', part)\n row['nr'] = step1[0].strip()\n timestring = '\\n'.join(step1[1:-1])\n row['validity'] = multiple_newlines_re.sub('\\n', timestring)\n step2 = filter(None, height_re.split(step1[-1].strip()))\n row['lower'] = step2[0]\n row['upper'] = step2[2]\n step3 = filter(None, center_radius_re.split(step2[-1].strip()))\n row['location'] = step3[0].strip()\n row['center'] = step3[1].strip()\n row['radius'] = step3[2].strip()\n row['activity'] = multiple_newlines_re.sub('\\n', step3[3].strip())\n\n # Add to list of rows\n rows.append((\n row['nr'].encode('utf8'),\n row['validity'].encode('utf8'),\n row['lower'].encode('utf8'),\n row['upper'].encode('utf8'),\n row['location'].encode('utf8'),\n row['center'].encode('utf8'),\n row['radius'].encode('utf8'),\n row['activity'].encode('utf8'),\n ))\n\n return tablib.Dataset(*rows, headers=headers)", "def mo_parse_pdf(self, filepath):\n\n text = textract.process(filepath, encoding='utf-8')\n text = text.decode('utf-8')\n\n if 'PRESSURE CALIBRATION DATA' in text:\n self.mo_parse_p(filepath)\n\n elif 'TEMPERATURE CALIBRATION DATA' or 'CONDUCTIVITY CALIBRATION DATA' in text:\n self.mo_parse_ts(text)\n\n else:\n pass", "def extract_table_3(first_line_idx, lineIN_list): \n DEBUG_3 = False\n if DEBUG_3: header = '>>>DEBUG_3:\\t'\n if DEBUG_3: print header, 'first_line_idx', first_line_idx \n if DEBUG_3: from pprint import pprint as pp\n my_lineOUT_list = []\n my_lineIN_list = []\n\n cell0_without_tab_list = [\n ]\n tab_within_cell_list = [\n ]\n\n for line_idx in range(first_line_idx+1, len(lineIN_list) ):\n this_line = lineIN_list[line_idx].rstrip('\\n') # strip EOL only, not other whtite space\n\n # Fix unwantted tab in original PDF file: replace unwanted tab into a space\n for tmp_str in tab_within_cell_list:\n this_line = this_line.replace('%s\\t'%(tmp_str), '%s '%(tmp_str), 1)\n\n if len(this_line) != 0:\n try:\n header_of_the_table \n except NameError:\n header_of_the_table = re.sub('\\t.*', '', this_line) # extract only the first cell\n my_lineIN_list.append(this_line)\n else:\n # The table itself is between 2 blank lines\n if len(my_lineIN_list) != 0:\n break\n\n # Make the text line compatible with CSV syntax\n line_idx = 0\n while line_idx < len(my_lineIN_list):\n this_line = my_lineIN_list[line_idx]\n this_delimiter_count = this_line.count('\\t')\n\n if DEBUG_3: print header, '%3d: 2000, this_line\\t(%s)'%(line_idx, this_line)\n\n if this_line.startswith(header_of_the_table):\n # header of the table\n delimiter_count = this_delimiter_count\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n\n else:\n # Get one or more line until enough cells: \n while this_line.count('\\t') < delimiter_count:\n # append next line\n if line_idx+1 < len(my_lineIN_list):\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_3: print header, '%3d: 3000, this_line\\t(%s)'%(line_idx, this_line)\n else:\n break\n\n # Has enough cells: append one or more line if these line has no tab except \n # the line start with a specific text\n while line_idx+1 < len(my_lineIN_list) and not '\\t' in my_lineIN_list[line_idx+1]:\n if any (my_lineIN_list[line_idx+1].startswith(z) for z in cell0_without_tab_list):\n break\n else:\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_3: print header, '%3d: 4000, this_line\\t(%s)'%(line_idx, this_line)\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n line_idx += 1\n\n if DEBUG_3:\n for str2 in my_lineOUT_list: print header, 'str2(%r)'%(str2)\n return my_lineOUT_list", "def extract_pdf(path):\n\n #only reading from pdf files\n\n text = textract.process(filename = path, encoding = \"ascii\")\n\n\n text.replace(\"\\n\", \" \")\n text.replace(\"\\t\", \" \")\n text.replace(\"\\r\", \" \")\n filter(lambda x: x in set(string.printable), text)\n\n return text", "def do_single_file_preprocess(pdf_file):", "def extract(key, path_pdf):\n\n path_tmp_pdf = extract_first_page(path_pdf)\n\n # extract all text from first page\n raw_text = extract_text(path_tmp_pdf)\n\n # extract abstract from whole page and replace hyphens etc.\n abstract = extract_abstract(raw_text)\n\n # something went wrong when abstract is longer than 1500 chars\n if len(abstract) > MAX_LEN:\n print('{}: Abstract is too long.'.format(path_pdf))\n\n if not abstract:\n print('{}: Could not extract abstract.'.format(path_pdf))\n\n # clean up temp file\n os.unlink(path_tmp_pdf)\n\n # TODO: Fix this return object\n out = {'@key': key, 'abstract': abstract}\n\n return out", "def pdf_to_txt(full_path):\n file = open(full_path,'rb')\n extracted_text = parser.from_buffer(file)\n return extracted_text['content']", "def extract_table_4(first_line_idx, lineIN_list): \n DEBUG_4 = False\n if DEBUG_4: header = '>>>DEBUG_4:\\t'\n if DEBUG_4: print header, 'first_line_idx', first_line_idx \n if DEBUG_4: from pprint import pprint as pp\n my_lineOUT_list = []\n my_lineIN_list = []\n\n cell0_without_tab_list = [\n ]\n tab_within_cell_list = [\n ]\n\n for line_idx in range(first_line_idx+1, len(lineIN_list) ):\n this_line = lineIN_list[line_idx].rstrip('\\n') # strip EOL only, not other whtite space\n\n # Fix unwantted tab in original PDF file: replace unwanted tab into a space\n for tmp_str in tab_within_cell_list:\n this_line = this_line.replace('%s\\t'%(tmp_str), '%s '%(tmp_str), 1)\n\n if len(this_line) != 0:\n try:\n header_of_the_table \n except NameError:\n header_of_the_table = re.sub('\\t.*', '', this_line) # extract only the first cell\n my_lineIN_list.append(this_line)\n if DEBUG_4: print header, '%3d: 1900, header_of_the_table\\t(%s)'%(line_idx, header_of_the_table)\n else:\n # The table itself is between 2 blank lines\n if len(my_lineIN_list) != 0:\n break\n\n # Make the text line compatible with CSV syntax\n line_idx = 0\n while line_idx < len(my_lineIN_list):\n this_line = my_lineIN_list[line_idx]\n this_delimiter_count = this_line.count('\\t')\n\n if DEBUG_4: print header, '%3d: 2000, this_line\\t(%s)'%(line_idx, this_line)\n\n if this_line.startswith(header_of_the_table):\n # header of the table\n delimiter_count = this_delimiter_count\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n\n else:\n # Get one or more line until enough cells: \n while this_line.count('\\t') < delimiter_count:\n # append next line\n if line_idx+1 < len(my_lineIN_list):\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_4: print header, '%3d: 3000, this_line\\t(%s)'%(line_idx, this_line)\n else:\n break\n\n # Has enough cells: append one or more line if these line has no tab except \n # the line start with a specific text\n while line_idx+1 < len(my_lineIN_list) and not '\\t' in my_lineIN_list[line_idx+1]:\n if DEBUG_4: print header, '%3d: 3900, this_line\\t(%s)'%(line_idx, this_line)\n #if any (my_lineIN_list[line_idx+1].startswith(z) for z in cell0_without_tab_list):\n if len(cell0_without_tab_list) >0 and any (my_lineIN_list[line_idx+1].startswith(z) for z in cell0_without_tab_list):\n break\n else:\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_4: print header, '%3d: 4000, this_line\\t(%s)'%(line_idx, this_line)\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n line_idx += 1\n\n if DEBUG_4:\n for str2 in my_lineOUT_list: print header, 'str2(%r)'%(str2)\n return my_lineOUT_list", "def read_pdf(exp_type='c'):\n # shape file path\n file_name = pdffile_exptype[exp_type]\n file_path_name = os.path.join('data', 'pdf', file_name)\n pdf_file = resource_filename(__name__, file_path_name)\n\n return h5py.File(pdf_file, 'r')", "def vat_return_reader(file_name):\r\n \r\n\r\n df = wrapper.read_pdf(file_name,pandas_options={'header':None},encoding=\"cp775\")\r\n print(\"tabula_read executed\")\r\n column_data = df.loc[0:45,2]\r\n column_name = df.loc[0:45,1]\r\n #replaces '.' with ',' in order to be treated as nummeric\r\n column_data = column_data.astype(str).str.replace(',','.')\r\n #converts str to float\r\n column_data = pd.to_numeric(column_data, errors='coerce')\r\n # renames row index as file names\r\n column_data = column_data.rename(index=f\"{file_name}\")\r\n return column_data", "def convert_pdf_into_csv(source_file, target_subdir):\n DEBUG = False\n if DEBUG: header = '>>>DEBUG:\\t'\n #if DEBUG: print header\n\n regexp_table = re.compile(r\"TABLE\\s+(\\d+\\w?)\\s\")\n fileIN = open( source_file, \"r\")\n lineIN_list = fileIN.readlines()\n\n # To create csv file for tables, we need to fix the lines within the table because\n # 1. the table cross page boundary that created many blank line(s)\n # 2. the text has embedded tab which should be removed\n # 3. others: see each \"fix_...\" for details\n lineIN_list = fix_lineIN_list_space_only(lineIN_list)\n lineIN_list = fix_lineIN_list_table_10(lineIN_list)\n lineIN_list = fix_lineIN_list_table_13(lineIN_list)\n lineIN_list = fix_lineIN_list_table_17(lineIN_list)\n lineIN_list = fix_lineIN_list_table_19(lineIN_list)\n lineIN_list = fix_lineIN_list_table_22(lineIN_list)\n lineIN_list = fix_lineIN_list_table_23(lineIN_list)\n lineIN_list = fix_lineIN_list_table_24(lineIN_list)\n lineIN_list = fix_lineIN_list_table_26(lineIN_list)\n lineIN_list = fix_lineIN_list_table_31(lineIN_list)\n lineIN_list = fix_lineIN_list_table_33(lineIN_list)\n lineIN_list = fix_lineIN_list_table_34(lineIN_list)\n lineIN_list = fix_lineIN_list_table_35(lineIN_list)\n lineIN_list = fix_lineIN_list_table_39(lineIN_list)\n\n csv_filename_list = []\n head, tail = os.path.split(source_file)\n file_name, file_extension = os.path.splitext(tail)\n\n # Every table\n #target_file_all_table = target_subdir.replace('.csv', '_all_table.csv')\n target_file_all_table = os.path.join(target_subdir, file_name + '_all_table.csv')\n print '>>>Creating', '({target_file_all_table})'.format(**locals())\n\n fileOUT_all_table = open( target_file_all_table, \"w+\")\n\n # Every byte table\n #target_file_byte_table = target_subdir.replace('.csv', '_byte_table.csv')\n target_file_byte_table = os.path.join(target_subdir, file_name + '_byte_table.csv')\n print '>>>Creating', '({target_file_byte_table})'.format(**locals())\n fileOUT_byte_table = open( target_file_byte_table, \"w+\")\n\n # memory map table\n #target_file_map_table = target_subdir.replace('.csv', '_map_table.csv')\n target_file_map_table = os.path.join(target_subdir, file_name + '_map_table.csv')\n print '>>>Creating', '({target_file_map_table})'.format(**locals())\n fileOUT_map_table = open( target_file_map_table, \"w+\")\n\n for line_idx in range( len(lineIN_list) ):\n #if DEBUG and 'TABLE' in lineIN_list[line_idx]:\n # print header, lineIN_list[line_idx].strip()\n if regexp_table.search(lineIN_list[line_idx]):\n table_nu = regexp_table.search(lineIN_list[line_idx]).group(1)\n #if DEBUG: print '>>>>>>>>>>>>>>table_nu =', table_nu\n #if DEBUG: print header, 'FOUND', table_nu, '\\t',lineIN_list[line_idx].strip()\n csv_filename_list.append(table_nu)\n\n #if table_nu == '19':\n # lineOUT_list = extract_table_byte_table(line_idx, lineIN_list)\n if table_nu == '1': lineOUT_list = extract_table_1(line_idx, lineIN_list)\n elif table_nu =='2': lineOUT_list = extract_table_2(line_idx, lineIN_list)\n elif table_nu =='3': lineOUT_list = extract_table_3(line_idx, lineIN_list)\n elif table_nu =='4': lineOUT_list = extract_table_4(line_idx, lineIN_list)\n elif any( table_nu == z for z in ('5', '6', '8', '9', '10', '11', '12', '13', '17', '18', '19', '22', '23', '24', '26', '27', '28', '29', '30', '32A', '31', '33', '34', '35', '39')): \n lineOUT_list = extract_table_byte_table(line_idx, lineIN_list)\n #elif table_nu =='7': lineOUT_list = extract_table_7(line_idx, lineIN_list)\n elif any( table_nu == z for z in ('14', '15', '16', '21', '25', '32', '32', '32', '32')): \n lineOUT_list = extract_table_byte_table(line_idx, lineIN_list)\n else:\n lineOUT_list = []\n\n DEBUG = False\n if DEBUG and len(lineOUT_list) != 0:\n print header, table_nu, 'lineOUT_list', lineOUT_list\n from pprint import pprint as pp\n pp(lineOUT_list)\n\n\n table_name = re.sub('^.*?TABLE ', 'TABLE ', lineIN_list[line_idx]) # Fix this line when it does not start with 'TABLE nn'\n\n if True:\n #target_file_single_table = target_subdir.replace('.csv', '_table_%s.csv'%(table_nu))\n target_file_single_table = os.path.join(target_subdir, file_name + '_table.csv')\n print '>>>Creating', '({target_file_single_table})'.format(**locals())\n\n fileOUT_single_table = open( target_file_single_table, \"w+\")\n fileOUT_single_table.write ('\\n'.join(lineOUT_list))\n fileOUT_single_table.close()\n\n #if len(lineOUT_list) != 0:\n if True:\n if table_nu != '1': fileOUT_all_table.write('\\n\\n')\n #try:\n # if not flag_all_table:\n # fileOUT_all_table.write('\\n\\n') # add lines between 2 tables\n #except NameError:\n # flag_all_table = True\n fileOUT_all_table.write('%s'%(table_name))\n fileOUT_all_table.write ('\\n'.join(lineOUT_list))\n\n if len(lineOUT_list)>0 and (re.search('^Byte,', lineOUT_list[0]) or re.search('^Address,Byte,', lineOUT_list[0])):\n #fileOUT_byte_table.write('\\n\\n')\n try:\n if flag_byte_table:\n fileOUT_byte_table.write('\\n\\n') # add lines between 2 tables\n #fileOUT_byte_table.write('\\n') # add lines between 2 tables\n except NameError:\n flag_byte_table = True\n fileOUT_byte_table.write('%s'%(table_name))\n\n # Fix table 3: remove first column about A0h\n if re.search('SINGLE BYTE', table_name):\n lineOUT_list = [re.sub('^Address.', '', this_line) for this_line in lineOUT_list]\n lineOUT_list = [re.sub('^A0h.', '', this_line) for this_line in lineOUT_list]\n\n fileOUT_byte_table.write ('\\n'.join(lineOUT_list))\n\n if len(lineOUT_list)>0 and re.search('MAP', table_name):\n try:\n if flag_map_table:\n fileOUT_map_table.write('\\n\\n') # add lines between 2 tables\n except NameError:\n flag_map_table = True\n fileOUT_map_table.write('%s'%(table_name))\n\n ## Fix table 3: remove first column about A0h\n #if re.search('SINGLE BYTE', table_name):\n # lineOUT_list = [re.sub('^Address.', '', this_line) for this_line in lineOUT_list]\n # lineOUT_list = [re.sub('^A0h.', '', this_line) for this_line in lineOUT_list]\n\n fileOUT_map_table.write ('\\n'.join(lineOUT_list))\n\n\n\n\n fileOUT_all_table.close()\n fileOUT_byte_table.close()\n fileOUT_map_table.close()\n\n return csv_filename_list", "def get_data_from_pdf(self, regex):\n match = re.search(regex, self.page_text)\n return match.group(0).replace(\" \", \"\").replace(\"\\n\", \"\")", "def find_tables(pdf_dict):\n \n table_dict = {}\n \n # matches numerical tables with no whitespace between entries\n table_matcher1= re.compile('\\S\\n[\\d\\W]')\n \n # matches tables with deliberate whitespaces between entries\n table_matcher2= re.compile('\\s\\n[\\d\\s]') \n \n \n i= 0\n for page_num, paragraphs in pdf_dict.copy().items():\n for paragraph_num, text in enumerate(paragraphs):\n \n # This if statement decides what should be interpreted\n # as a \"table string\" on the text.\n # Right now, it is set to identify as a table a string that\n # has more than 4 newline characters surrounded by non white space\n # characters or a string with at least three\n # newline spaces deliberately surrounded by white spaces\n # This 'sensitivity of tables' can be modified according\n # the need and aspect of documents parsed.\n \n if (len(table_matcher1.findall(text))>=4 or len(table_matcher2.findall(text))>=3):\n i+=1\n table_position_dict = {'page':page_num,\n 'paragraph': paragraph_num+1,\n 'raw_table_text':text}\n table_dict[i] = table_position_dict\n return table_dict", "def pdf_to_test(file_name):\n #Opening, reading and parsing a pdf file to string\n pdfFileObj = open(file_name, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pdf_string = pdfReader.getPage(0).extractText()\n \n #Find the RechnungsNr.\n start_of_RN = pdf_string.find(\"No.Invoice Date\") + len(\"No.Invoice Date\")\n rechnungs_nr = pdf_string[start_of_RN:start_of_RN+7]\n \n #Find the address\n start_of_address = pdf_string.find(\"Invoice Address\") + len(\"Invoice Address\")\n end_of_address = pdf_string.find(\"Payment Terms:\")\n address = pdf_string[start_of_address:end_of_address]\n \n #Liefermonat commenrs\n start_of_contract = pdf_string.find(\"Company Name / Line of business\") + len(\"Company Name / Line of business\")\n end_of_contract = pdf_string.find(\"Summary of Charges\")\n contract = pdf_string[start_of_contract:end_of_contract]\n \n #Nettobetrag - read base charge\n start_of_netto = pdf_string.find(\"Base Charges\") + len(\"Base Charges\")\n end_of_netto = pdf_string.find(\"Click Charges - Color\")\n nettobetrag = pdf_string[start_of_netto:end_of_netto]\n \n pdfFileObj.close()\n \n return pdfFileObj.name, rechnungs_nr, address, contract, nettobetrag", "def extract_table_data(path):\n document = zipfile.ZipFile(path)\n xml_content = document.read('word/document.xml')\n document.close()\n tree = XML(xml_content)\n table = tree.find(BODY).find(TABLE)\n rows = table.findall(ROW)\n\n day = \"\"\n dates = \"\"\n teachers = {}\n\n for row in rows[1:]:\n cols = row.findall(COL)\n\n #Code\n code = extract_text(cols[0])\n\n #course\n course = extract_text(cols[1])\n\n #day\n dayTemp = extract_text(cols[2])\n if dayTemp != \"\":\n day = dayTemp\n\n #Calendar/Date\n temp_date = extract_date(cols[3])\n if len(temp_date) != 0:\n dates = temp_date\n\n #hour\n times = extract_time(cols[4])\n\n #Teachers\n teacher = extract_text(cols[5])\n\n if teacher == \"\":\n continue\n\n if teacher not in teachers:\n teachers[teacher] = Teacher(teacher)\n\n segment = Segment(code, course, day, dates, times)\n\n teachers[teacher].add_segment(segment)\n\n return teachers", "def read_pdf(\n pdf_file: UploadFile = File(...),\n settings: config.Settings = Depends(get_settings),\n db: Session = Depends(get_db),\n authorization: str = Header(None),\n):\n if authorization != settings.upload_secret:\n raise HTTPException(401, \"Operação inválida!\")\n\n file = pdf_file.file\n content = file.read()\n\n # Builds the path\n target_path = Path(settings.pdf_storage_path)\n filename = target_path.joinpath(pdf_file.filename)\n save_pdf(content, filename)\n\n db_results = read_results(db, PDF_Filename=pdf_file.filename)\n\n if db_results:\n db_result = db_results[0]\n user, password = create_patient_user(\n db,\n cpf=db_result.CPF,\n name=f\"{db_result.prMotherFirstname} {db_result.prMotherSurname}\",\n )\n\n sms_message = f\"{user.name}, o resultado do exame do pézinho está pronto. \"\n\n if password:\n sms_message += f\"Faça login com seu cpf e a senha {password}\"\n\n number = db_result.ptnPhone1 or db_result.ptnPhone2\n\n if number:\n sms_utils.send_sms(number, sms_message)\n else:\n log(\n f\"[PDF] Arquivo {pdf_file.filename} importado mas sem \"\n \"celulares associados. SMS não será enviado.\"\n )\n else:\n log(\n f\"[PDF] Arquivo {pdf_file.filename} importado mas sem \"\n \"resultado associado. SMS não será enviado.\"\n )\n\n log(\"[PDF] PDF foi importado.\", db)\n\n return PDFProcessed(\n length=len(content), filename=pdf_file.filename, sha256=sha256(filename)\n )", "def parsepdf(intext): # type: (str) -> str\n\n pdfbinarydata = base64.b64decode(intext.strip())\n pdfFileObj = io.BytesIO()\n pdfFileObj.write(pdfbinarydata)\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n extractedText = ''\n for i in range(0, pdfReader.numPages):\n pageObj = pdfReader.getPage(i)\n extractedText = extractedText + pageObj.extractText()\n\n return extractedText.strip()", "def extract_table_2(first_line_idx, lineIN_list): \n DEBUG_2 = False\n if DEBUG_2: header = '>>>DEBUG_2:\\t'\n if DEBUG_2: print header, 'first_line_idx', first_line_idx \n if DEBUG_2: from pprint import pprint as pp\n my_lineOUT_list = []\n my_lineIN_list = []\n\n #OBS header_of_the_table = 'Parameter'\n cell0_without_tab_list = [\n 'Endurance',\n ]\n tab_within_cell_list = [\n ]\n\n for line_idx in range(first_line_idx+1, len(lineIN_list) ):\n\n this_line = lineIN_list[line_idx].rstrip('\\n') # strip EOL only, not other whtite space\n\n #TMP if DEBUG_2: print\n #TMP if DEBUG_2: print header, '%3d: 1000, this_line\\t(%s)'%(line_idx, this_line)\n #TMP if DEBUG_2: print header, '%3d: len(this_line)\\t(%s)'%(line_idx, len(this_line))\n #TMP if DEBUG_2: print header, '%3d: len(my_lineIN_list)\\t(%s)'%(line_idx, len(my_lineIN_list))\n\n # Fix unwantted tab in original PDF file: replace unwanted tab into a space\n for tmp_str in tab_within_cell_list:\n this_line = this_line.replace('%s\\t'%(tmp_str), '%s '%(tmp_str), 1)\n\n if len(this_line) != 0:\n try:\n header_of_the_table \n except NameError:\n header_of_the_table = re.sub('\\t.*', '', this_line) # extract only the first cell\n my_lineIN_list.append(this_line)\n else:\n # The table itself is between 2 blank lines\n if len(my_lineIN_list) != 0:\n break \n\n # Make the text line compatible with CSV syntax\n line_idx = 0\n while line_idx < len(my_lineIN_list):\n this_line = my_lineIN_list[line_idx]\n this_delimiter_count = this_line.count('\\t')\n\n if DEBUG_2: print header, '%3d: 2000, this_line\\t(%s)'%(line_idx, this_line)\n\n if this_line.startswith(header_of_the_table):\n # header of the table\n delimiter_count = this_delimiter_count\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n\n else:\n # Get one or more line until enough cells: \n\n while this_line.count('\\t') < delimiter_count:\n # append next line\n if line_idx+1 < len(my_lineIN_list):\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_2: print header, '%3d: 3000, this_line\\t(%s)'%(line_idx, this_line)\n else:\n break\n\n # Has enough cells: append one or more line if these line has no tab except \n # the line start with a specific text\n while line_idx+1 < len(my_lineIN_list) and not '\\t' in my_lineIN_list[line_idx+1]:\n if any (my_lineIN_list[line_idx+1].startswith(z) for z in cell0_without_tab_list):\n break\n else:\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_2: print header, '%3d: 4000, this_line\\t(%s)'%(line_idx, this_line)\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n line_idx += 1\n\n #if DEBUG_2: pp(my_lineOUT_list)\n if DEBUG_2: \n for str2 in my_lineOUT_list: print header, 'str2(%r)'%(str2)\n return my_lineOUT_list", "def parse_pdfs():\n # get all of the pdf files in the dir\n pahopdffiles = [f for f in listdir(paho_raw_reports_dir) if isfile(join(paho_raw_reports_dir, f))]\n # set up a list to hold the data for all pdf files\n all_pdf_data = []\n # read in each pdf file\n for pahopdffile in pahopdffiles:\n try:\n logging.info(\"Now attempting to read in: \"+pahopdffile)\n fullfilepath = os.path.join(paho_raw_reports_dir, pahopdffile)\n tables = camelot.read_pdf(fullfilepath)\n # get the pandas dataframe from each pdf\n pdfdataframe = tables[0].df\n # ensure that this is a valid PAHO COVID19 report\n report_keywords = ['Cumulative','COVID-19','Americas'] \n if not all(x in pdfdataframe[0].iloc[0] for x in report_keywords):\n logging.error(pahopdffile+\" was not recognised as a normal PAHO pdf file. Skipping.\")\n continue\n # set up the list to hold the data for this file\n reportdata = []\n # create a variable to store the date of this report\n date = None\n # create a variable to store the last subregion seen\n subregion = None\n # PAHO has different formats for their tables, so we need to check the number of columns in the pdf\n numcolumns = len(pdfdataframe.columns)\n # get the row index for the last country\n lastcountryrowindex = pdfdataframe[1][pdfdataframe[1] == 'Total'].index[0]-1\n for rowindex,rowdata in pdfdataframe.iterrows():\n # set up variables to hold the data for the dict\n country_or_territory_name = None\n confirmed_cases = None\n probable_cases = None\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n if numcolumns == 6:\n # this is the old format that they started with\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].replace('Cumulative suspected and confirmed COVID-19 cases reported by \\ncountries and territories in the Americas, as of ','')\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n if not date:\n raise RuntimeError(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[5]\n # store null data for all other fields that were not present in the old reports\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n elif numcolumns == 9:\n # PAHO added in probable cases\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[2]:\n split_numbers = rowdata[2].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n transmission_type = rowdata[8]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[5]\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[6].replace(\",\",\"\"))\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[8]\n elif numcolumns == 10:\n # PAHO added in country ISO codes and special characters\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(3,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[2] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[2] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[2]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[3]:\n split_numbers = rowdata[3].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n transmission_type = rowdata[9]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[5].replace(\",\",\"\"))\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[6]\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[7].replace(\",\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n if rowdata[9] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[9]\n else:\n logging.error(\"Unrecognised number of columns in the pdf file. Skipping for now.\"+\n \"Check if the report format changed from PAHO.\")\n # if we were at least able to scrape the country or territory name, create a dict and add it to the list\n if country_or_territory_name is not None:\n # set up the dict to store each row of data\n reportdict = collections.OrderedDict()\n # add the values to the dict in the order that we want for the report\n reportdict['date'] = date\n reportdict['country_or_territory_name'] = country_or_territory_name\n reportdict['confirmed_cases'] = confirmed_cases\n reportdict['probable_cases'] = probable_cases\n reportdict['confirmed_deaths'] = confirmed_deaths\n reportdict['probable_deaths'] = probable_deaths\n reportdict['recovered'] = recovered\n reportdict['percentage_increase_confirmed'] = percentage_increase_confirmed\n reportdict['transmission_type'] = transmission_type\n # now add this dict to our list for this report/pdf\n reportdata.append(reportdict)\n # once we are done adding all data for this pdf, add this pdf report to the list of all reports\n # if the reportdata list is not empty\n if reportdata:\n all_pdf_data.append(reportdata)\n logging.info(\"Successfully parsed \"+pahopdffile)\n except Exception as exc:\n logging.exception(\"Problem found while parsing \"+pahopdffile)\n raise\n logging.info(\"Completed parsing all pdfs in folder.\")\n return all_pdf_data", "def process_pdf(filename, qualies_only=False):\n if filename.endswith('.txt'):\n f = open(filename)\n text = f.read()\n f.close()\n else:\n text = subprocess.check_output([\"pdftotext\", \"-layout\",\n filename, \"-\"]).decode('utf-8')\n\n print(\"Processing {}...\".format(filename))\n\n pages = text.split(chr(12))\n print (\"{} Pages\".format(len(pages)))\n md = []\n qd = []\n for p in pages:\n if ('MAIN DRAW SINGLES' in p or 'Singles Championship' in p\n or 'Ladies\\' Singles' in p):\n md += [p]\n elif ('QUALIFYING SINGLES' in p or 'Qualifying Singles' in p\n or 'Qualifying Ladies\\' Singles' in p):\n qd += [p]\n elif ('Qualifiers' in p and not 'Doubles' in p):\n qd += [p]\n\n md_result = None\n qd_result = None\n\n meta = None\n if md and not qualies_only:\n md_result = drawsheet_process(chr(12).join(md))\n meta = md_result[2]\n\n # copy the metadata to the quaily draw if possible\n if qd:\n qd_result = drawsheet_process(chr(12).join(qd), meta, True)\n\n return (md_result, qd_result)", "def pdf_to_text(file_object):\n pdfData = file_object.read()\n tf = tempfile.NamedTemporaryFile()\n tf.write(pdfData)\n tf.seek(0)\n outputTf = tempfile.NamedTemporaryFile()\n\n if len(pdfData) > 0:\n out, err = subprocess.Popen([\"pdftotext\", \"-layout\", tf.name, outputTf.name ]).communicate()\n return outputTf.read()\n else:\n return None", "def pdf_miner_extract(pdf_file, password='', pages=0):\n pdf_resource_manager = PDFResourceManager()\n output_stream = StringIO()\n device = TextConverter(pdf_resource_manager, output_stream,\n laparams=LAParams(char_margin=0.8, detect_vertical=False))\n file_stream = open(pdf_file, 'rb')\n interpreter = PDFPageInterpreter(pdf_resource_manager, device)\n pages_set = []\n for page in PDFPage.get_pages(file_stream, set(), pages, password):\n interpreter.process_page(page)\n pages_set.append(output_stream.getvalue())\n output_stream.truncate(0)\n file_stream.close()\n device.close()\n output_stream.close()\n return pages_set", "def extract_text_from_pdf(file):\n\n return RegexpTokenizer(r'\\w+').tokenize(parser.from_file(file)['content'])", "def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")", "def scrape_pdfs(db):\n process = CrawlerProcess()\n process.crawl(PdfSpider, db=db)\n process.start()", "def load_pdf(self, env=\"default\", debug=()):\n os.makedirs(\"txt\", exist_ok=True)\n if env is \"default\": # default python path\n call([executable,\n os.path.join(f\"{exec_prefix}\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n if env is \"venv\": # virtual environment\n call([os.path.join(\"venv\", \"Scripts\", \"python.exe\"),\n os.path.join(\"venv\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n with open(os.path.join(\"txt\", f\"{self.txt_filename}\"), \"r\", encoding=\"utf-8\") as file:\n self.paragraphs = [paragraph.rstrip('\\n') for paragraph in file]\n os.remove(os.path.join(\"txt\", f\"{self.txt_filename}\"))\n if debug:\n for counter, paragraph in enumerate(self.paragraphs):\n try:\n if int(debug[0]) < counter < int(debug[1]):\n print(counter, paragraph)\n except TypeError:\n print(\"Debug must be a (x,y) touple.\")", "def transform(self):\n count=1\n assert len(self.list_folder)>=1 ,\"FILES NOT FOUND\"\n for i,folder in enumerate(self.list_folder):\n path=folder\n for j,pdf in enumerate(os.listdir(path)):\n if pdf!= '.DS_Store':\n self.df.loc[count] = [pdf,folder.split('/')[-2], i+1,None,None]\n \n \"\"\" 0- Read Pdf file \"\"\"\n raw = parser.from_file(os.path.join(path,pdf))\n s = raw['content']\n \n \"\"\" 1- Handle linebreaks to optimize TextBlob.sentences results\"\"\"\n s=self.treat_new_line(s)\n \n \"\"\" 2- Divide text by sentences using TextBlob\"\"\"\n blob=TextBlob(s)\n paragraphs = np.array([str(s) for s in blob.sentences],dtype=str)\n self.parser = []\n self.parser_raw=[]\n p=self.text_processor_pdf(paragraphs)\n \n \"\"\"\n 3- Get rid of bad text data:\n Discard sentences with too long word (16 is the 99% quantile in english)\n Discard sentences with too much upper words (CREDENTIALS, Link, TITLE ..)\n \"\"\"\n index_=[i for i,c in enumerate(self.parser) if (True in [len(w)>=16 for w in c.split()] )]\n index_raw=[i for i,c in enumerate(self.parser_raw) if np.sum([w==w.upper() for w in c.split()])>=4]\n index=list(set(index_ + index_raw))\n self.df.loc[count,'paragraphs']=np.delete(np.array(self.parser),index)\n self.df.loc[count,'raw paragraphs']=np.delete(np.array(self.parser_raw),index)\n count+=1\n \n print(\"files from {} succesfully converted \".format(folder))\n \n return self.df", "def get_text_from_pdf(self, path):\n os.system(\"pdftotext {} tmp.txt > /dev/null\".format(path))\n with open('tmp.txt') as f:\n self.text = f.read()\n os.remove('tmp.txt')", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def get_data_from_table(page):\r\n soup = BeautifulSoup(page.text, 'lxml')\r\n table = soup.find('table', id='filaSel')\r\n for row in table.find_all('tr'):\r\n data_list = []\r\n for column in row.find_all('td'):\r\n data_list.append(column.get_text().strip())\r\n write_to_file(data_list, 'a', '\\t')", "def extract_text(path, pages):\n out = []\n with open(path, 'rb') as file:\n pdftotext_string = pdftotext.PDF(file)\n\n for i in pages:\n out.append(pdftotext_string[i - 1])\n\n return out", "def readFile(self):\n with pdfplumber.open(self.path) as pdf:\n first_page = pdf.pages[0]\n text = first_page.extract_text()\n text = text.split('\\n')\n return processText(text)", "def displayPdfData(self, fileObj):\n # Enable the menus\n self.changeEnableMenus(fileObj)\n\n # Display the metadata\n self.displayMetadata(fileObj.allMetadata)\n\n # Clear the text field\n self.personalDataList.clear()\n\n # load all pages\n logging.debug(\"Loading \" + str(len(fileObj.personalData.pdata.pdfImgs)) + \" pages\") \n i = 1\n for pageImg in fileObj.personalData.pdata.pdfImgs:\n self.loadImageToPersonalData(fileObj.filePath + QtCore.QString(i), pageImg)\n i += 1", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def read_pdf_file(file):\n return pdftotext.PDF(file)", "def from_pdf(path):\n raw_regexes = [\n r\"\"\"<prism:doi>(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</prism:doi>\"\"\",\n r\"\"\"[\"'](?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)[\"']\"\"\",\n r\"\"\"URI\\s*\\(https?://doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n r\"\"\"URI\\s*\\((?:https?://)?www.nature.com/doifinder/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n # This one works for some ACIE papers, but is too risky. It matches\n # against DOIs of cited papers too. Better to use WPS-ARTICLEDOI.\n # r\"\"\"/URI\\(https?://(?:dx)?.doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"/WPS-ARTICLEDOI\\s*\\((10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"\\((?:doi|DOI):\\s*(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"<rdf:li.+>(?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</rdf:li>\"\"\",\n ]\n regexes = [re.compile(regex) for regex in raw_regexes]\n class _DOIFound(Exception):\n pass\n\n p = Path(path)\n if not (p.exists() or p.is_file()):\n return _error(f\"from_pdf: invalid path '{p}' given\")\n\n strings = subprocess.Popen([\"strings\", p], stdout=subprocess.PIPE)\n grep = subprocess.Popen([\"grep\", \"-i\", \"doi\"], stdin=strings.stdout, stdout=subprocess.PIPE)\n try:\n for line in grep.stdout:\n line = line.decode(_g.gpe).strip()\n for regex in regexes:\n match = regex.search(line)\n if match:\n raise _DOIFound(match.group(1))\n except _DOIFound as e:\n doi = e.args[0]\n # Prune away any extra parentheses at the end.\n nopen = doi.count('(')\n nclose = doi.count(')')\n if nopen != nclose:\n doi = doi.rsplit(')', maxsplit=(nclose - nopen))[0]\n # Report success.\n return DOI(doi)\n else:\n return _error(f\"from_pdf: could not find DOI from '{p}'\")", "def extract_and_process(input_dir, pdf_path, json_output):\n\tprint('Extracting text from: ', pdf_path)\n\toutput_dir = input_dir + \"/output\"\n\ttry:\n\t\t# Extract PDF to HTML format\n\t\textracted_text = txt_ext.extract_pdf_to_html(pdf_path)\n\t\t# Write raw HTML\n\t\t#pre_proc.create_text_file(output_dir + \"/raw_\" + path_leaf(pdf_path) + \".html\", extracted_text)\n\t\t\n\t\tprint(\"Extraction finished: \"+ pdf_path + \", starting processing\")\n\t\tprocess(extracted_text, output_dir, path_leaf(pdf_path), json_output)\n\n\texcept PDFSyntaxError:\n\t\tprint(\"PDFSyntaxError: Is this really a PDF? \", pdf_path)\n\texcept PDFTextExtractionNotAllowed as e:\n\t\tprint(e)", "def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())", "def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)", "def test_malformed_pdf(self):\n paper = factories.Paper.create(document=factory.django.FileField(\n data=b\"\"))\n paper_url = \"{}/{}\".format(EXTRACT_URL, paper.unique_id)\n c = django.test.Client()\n # Extract all at once\n d = json.loads(c.get(paper_url).content)\n self.assertEqual({\"error\"}, set(d.keys()))", "def extract_pages(pdf):\n parser = PDFParser(pdf)\n document = PDFDocument(parser)\n\n if not document.is_extractable:\n return\n\n resource_manager = PDFResourceManager()\n device = PDFPageAggregator(resource_manager)\n interpreter = PDFPageInterpreter(resource_manager, device)\n\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n yield device.get_result()", "def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n # get the data\n data = requests.get(page_url)\n\n # load data into bs4\n soup = BeautifulSoup(data.text, 'html.parser')\n # links = []\n pdf_dis = []\n dates = []\n table = []\n version_hash_fields = []\n\n for tr in soup.find_all('tr'):\n date_col = soup.find_all('td', attrs={'class': 'fd-col2'})\n hyperlink_col = soup.find_all('td', attrs={'class': 'fd-col1'})\n values = [td.text for td in tr.find_all('td')]\n table.append(values)\n for link in hyperlink_col:\n pdf_url = 'https://www.health.mil/' + link.find('a')['href']\n pdf_di = DownloadableItem(doc_type='pdf',\n web_url=pdf_url)\n pdf_dis.append(pdf_di)\n for date in date_col:\n dates.append(date.text)\n\n doc_nums = []\n doc_titles = []\n doc_names = []\n for row in table[1:]:\n doc_data = row[0].split(':')\n\n if len(doc_data) == 1: # if no colon then no doc number\n if doc_data[0] == \"(DTM)-19-004 -Military Service by Transgender Persons and Persons with Gender Dysphoria (Change 1)\":\n doc_nums.append(\"19-004\")\n doc_names.append(\"DTM\")\n doc_titles.append(doc_data[0][14:])\n version_hash_fields.append({\"doc_name\": 'DTM', \"doc_title\": doc_data[0][14:]})\n else:\n doc_nums.append(\" \")\n doc_titles.append(doc_data[0])\n doc_names.append(doc_data[0])\n version_hash_fields.append({\"doc_name\": doc_data[0], \"doc_title\": doc_data[0]})\n else:\n\n tmptitle = doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\")\n\n if \"Volume\" in tmptitle:\n doc_nums.append(doc_data[0][7:]+\" Volume \"+tmptitle.split()[-1])\n else:\n doc_nums.append(doc_data[0][7:])\n doc_titles.append(doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\"))\n doc_names.append(doc_data[0][:6])\n\n version_hash_fields.append({\"doc_name\": doc_data[0][:7], \"doc_title\": doc_data[1]})\n\n parsed_docs = []\n page_url = 'https://www.health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n num_docs = len(doc_nums)\n for i in range(num_docs):\n # put all the relevant info into dictionaries\n doc = Document(doc_type=doc_names[i].replace(\" \",\"-\"),\n doc_title=doc_titles[i],\n doc_num=doc_nums[i],\n doc_name=doc_names[i].replace(\" \",\"-\")+\" \"+doc_nums[i],\n publication_date=dates[i],\n cac_login_required=False,\n crawler_used='dha_pubs',\n source_page_url=page_url,\n downloadable_items=[pdf_dis[i]],\n version_hash_raw_data=version_hash_fields[i])\n parsed_docs.append(doc)\n\n return parsed_docs", "def convert_pdf_to_txt(pdf):\n stdout = subprocess.Popen([\"pdftotext\", \"-q\", pdf, \"-\"],\n stdout=subprocess.PIPE).communicate()[0]\n return stdout", "def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it", "def parse_table_to_tracy_file(latname: str, df: pd.DataFrame, filename: str) -> None:\n save_string(parse_table_to_tracy_string(latname, df), filename)", "def main():\n mip = parametros()\n mir = Reporte(CURRENT_PATH, mip.debug, mip.overwrite)\n pdfs = mir.obtener()\n if pdfs:\n print(\"Obteniendo nuevos pdf:\")\n for pdf in pdfs:\n print(f\"* {pdf}\")\n\n for file in glob.glob(f\"{CURRENT_PATH}/resources/pdf/*.pdf\"):\n data = mir.parser(file)\n mir.escribir(data)", "def main():\n argparser = argparse.ArgumentParser(description=\"Convert plot to table\")\n\n argparser.add_argument(\"pdf\", action=\"store\", help=\"pdf file\",\n default=None, nargs=\"*\")\n\n args = argparser.parse_args()\n\n if len(args.pdf) == 0:\n open_gui()\n else:\n process_pdf(args.pdf[0])\n\n generate_data()", "def _pdf_to_txt(file_path, dst_dir, file_name):\n if file_name is None:\n file_name = os.path.split(file_path)[1]\n file_dst = os.path.join(dst_dir, re.sub(r'\\.pdf$', '.txt', file_name))\n return subprocess.call([\"pdftotext\", \"-layout\", file_path, file_dst])", "def parse_pdf(url):\n pdf_data = urllib2.urlopen(Request(url)).read()\n # Cast to StringIO object\n from StringIO import StringIO\n memory_file = StringIO(pdf_data)\n\n # Create a PDF parser object associated with the StringIO object\n parser = PDFParser(memory_file)\n\n # Create a PDF document object that stores the document structure\n document = PDFDocument(parser)\n\n # Define parameters to the PDF device object\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n pageno = 1\n codec = 'utf-8'\n\n # Create a PDF device object\n device = TextConverter(rsrcmgr, retstr, codec=codec, pageno=pageno,\n laparams=laparams)\n\n # Create a PDF interpreter object\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n\n # Process each page contained in the document\n text = ''\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n text = retstr.getvalue()\n\n vol = get_vol(text)\n no = get_no(text)\n return vol, no", "def convert_pdf(pdf_path):\n with Image(filename=pdf_path, resolution=300, format=\"pdf\") as pdf:\n pdf.convert('tiff')\n pdf.save(filename='./data/raw/full.tiff')", "def pdf_link_to_text(pdf_attachment):\n print(\"Downloading \" + str(pdf_attachment))\n filename = download_pdf_file(pdf_attachment)\n print(\"Extracting text from \" + str(filename))\n pdf_text = convert_pdf_to_text(filename)\n os.remove(filename)\n return pdf_text", "def digital_text(file_path):\n doc = fitz.open(file_path)\n page_count = doc.pageCount\n print(\"\\n number of pages : \",page_count)\n total_text = \"\"\n try:\n for page_num in range(page_count):\n p = doc.loadPage(page_num)\n page_text = p.getText()\n total_text += page_text\n print(\"\\n number of pages extracted : \", (page_count))\n except Exception as e:\n print(\"\\n Error in digital_text : \", traceback.format_exc(()))\n return total_text", "def analyze(directory, pdf_file, doc_type):\n\n total_redaction_count = 0\n total_redacted_text_area = 0\n total_estimated_text_area = 0\n total_estimated_num_words_redacted = 0\n\n # Split the pdb (which is a pdf file) into individual jpgs.\n redaction_module.pdf_to_jpg(directory, pdf_file)\n\n os.chdir(directory)\n for jpg_file in os.listdir(directory):\n # Iterating through each page of the PDB\n if jpg_file.endswith(\".jpg\"):\n\n [redaction_count, redacted_text_area, estimated_text_area, estimated_num_words_redacted, potential, text_potential, type1, type2, type3] = redaction_module.image_processing(jpg_file, doc_type)\n\n total_redaction_count += redaction_count\n total_redacted_text_area += redacted_text_area\n total_estimated_text_area += estimated_text_area\n total_estimated_num_words_redacted += estimated_num_words_redacted\n\n # Crucial clean-up of jpg files (Note: If files are not removed, code will NOT work properly).\n os.remove(jpg_file)\n\n # Now that we've gone through each page, we need to calculate the stats for the document.\n if total_estimated_text_area != 0:\n total_percent_text_redacted = float(total_redacted_text_area / total_estimated_text_area)\n else:\n total_percent_text_redacted = 0\n\n data = []\n # open csv file and write the stats in a single row representing the document.\n with open('output.csv', mode='a+') as output:\n output_writer = csv.writer(output, delimiter=',')\n row = [pdf_file, total_redaction_count, total_percent_text_redacted, total_estimated_num_words_redacted]\n data.append(row)\n print(tabulate(data, headers=[\" \", \" \", \" \", \" \", \" \"]))\n output_writer.writerow(row)\n output.close()", "def textify(read_pdf,spage,epage):\n\n page_text = \"\"\n for page in range(spage, epage):\n page_content = read_pdf.getPage(page)\n page_text += page_content.extractText()\n\n full_text = page_text #.encode('utf-8')\n return full_text", "def processPdf(self, pdf_path: str) -> (list, list):\n hocr_list = []\n images = []\n numPages = self.getNumberPages(pdf_path)\n for initalpage in range(1, numPages+self.batch, self.batch):\n pages = pdf2image.convert_from_path(pdf_path,\n first_page=initalpage,\n last_page=min(\n initalpage+self.batch-1, numPages),\n output_folder=self.images_path,\n grayscale='true',\n fmt='tif')\n for page in pages:\n hocr_bytes = pytesseract.image_to_pdf_or_hocr(page, \n lang='por',\n extension='hocr',\n config='--psm 1')\n hocr_list.append(hocr_bytes)\n images.append(page.filename)\n page.close()\n return hocr_list, images", "def scrape (url, pdf_filename, pdf_page_size=PDF_PAGE_SIZE, folder=OUTPUT_FOLDER, clean_it=True):\n\n raw_html = get_url(url)\n if raw_html is None:\n print \"Sorry, could not read \", url\n else:\n filename_prefix, file_ext = os.path.splitext(pdf_filename)\n if clean_it:\n title = Document(raw_html).short_title()\n content = Document(raw_html).summary(html_partial=True)\n frame = HTML_FRAME.substitute(content=to_unicode(content),\n url=url,\n title=title)\n source = write_html_file(folder, os.extsep.join([filename_prefix, 'html']), frame)\n else:\n source = write_html_file(folder, os.extsep.join([filename_prefix, 'html']), raw_html)\n\n if source:\n generate_pdf (folder, filename_prefix, pdf_page_size)", "def explore_data(data,prj_info,TMP=1234):\r\n print(\" Data file rows and columns are : \", data.shape)\r\n #Open pdf\r\n pp = PdfPages(prj_info['OUTPUT_PATH'] + \"exploration_\" + str(TMP) + \".pdf\")\r\n\r\n #Plot average\r\n plot_average_reponse(data,prj_info,pp,TMP)\r\n\r\n #Close pdf\r\n pp.close()\r\n return None", "def convert_pdf_to_text(pdf_path):\n process_id = os.getpid()\n resource_manager = PDFResourceManager()\n output = StringIO.StringIO()\n laparams = LAParams(detect_vertical=True)\n device = TextConverter(\n resource_manager,\n output,\n codec='utf-8',\n laparams=laparams\n )\n interpreter = PDFPageInterpreter(resource_manager, device)\n file_handler = file(pdf_path, 'rb')\n pages = PDFPage.get_pages(file_handler)\n\n for idx, page in enumerate(pages):\n print(\"Page \" + str(idx + 1), end='\\r')\n sys.stdout.flush()\n interpreter.process_page(page)\n print()\n\n data = output.getvalue()\n data = data.replace('\\n', ' ')\n data = data.replace('\\t', ' ')\n data = data.replace('\\r', ' ')\n data = data.replace('\\x0c', ' ')\n\n return data", "def test_extract(self):\n for document in [test_pdfutil.BLANK, test_pdfutil.LOREM]:\n paper = factories.Paper.create(document=factory.django.FileField(\n data=document))\n paper_url = \"{}/{}\".format(EXTRACT_URL, paper.unique_id)\n\n c = django.test.Client()\n variables = [\"funding\", \"grant_id\"]\n for var in variables:\n var_url = \"{}/{}\".format(paper_url, var)\n self.assertEqual(b'{\"value\":null}', c.get(var_url).content)\n\n # Extract all at once\n self.assertEqual(b'{\"funding\":null,\"grant_id\":null}',\n c.get(paper_url).content)", "def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")", "def process_pdf(pdf):\n\n if os.path.exists(legend_images_dir):\n subprocess.call([\"rm\", \"-rf\", legend_images_dir])\n os.makedirs(legend_images_dir)\n\n if os.path.exists(plot_images_dir):\n subprocess.call([\"rm\", \"-rf\", plot_images_dir])\n os.makedirs(plot_images_dir)\n\n if os.path.exists(csv_output_dir):\n subprocess.call([\"rm\", \"-rf\", csv_output_dir])\n os.makedirs(csv_output_dir)\n\n if os.path.exists(pdf_output_dir):\n subprocess.call([\"rm\", \"-rf\", pdf_output_dir])\n os.makedirs(pdf_output_dir)\n\n genImages(pdf)", "def savePDFFile(self):\n s = self.text.get(\"1.0\", tk.END)\n f = open(file, \"w\", encoding='utf-8')\n f.write(s)\n f.close()\n\n # Create a file for each student with their graded files\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size=12)\n pdf.multi_cell(0, 5, s)\n\n # Removed the \\t from the filepath in order to save as pdf in 'Graded' file\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n pdf.output(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n highlightingTextInFile()", "def pdf_to_text(self, f):\n cmd = [\"pdftohtml\", \"-zoom\", \"1.35\", \"-xml\", \"-stdout\", f.name]\n code, stdout, stderr = self.shell(cmd)\n if code > 0:\n raise ValueError(stderr)\n return stdout.decode('utf-8')", "def main(input_filepath, latex):\n logger = logging.getLogger(__name__)\n\n df = pd.read_csv(input_filepath)\n out = df.head()\n if latex:\n out = out.to_latex()\n print(out)", "def extractEntities(file):\n\n table=readFile(file)\n if table is None:\n return\n htmlMatrix=np.array(table.htmlMatrix, object)\n headers=table.colHeaders\n if len(headers)==0:\n return\n\n dictColClasses = {str(i)+\"###\"+h:{} for i, h in enumerate(headers)}\n coli=0\n for col in range(htmlMatrix.shape[1]):\n if coli>=len(headers):\n print('Column out of headers: ', table.tableId, coli)\n continue\n colname=str(coli)+\"###\"+headers[coli]\n for row in range(table.startRows,htmlMatrix.shape[0]):\n listEntities=htmlMatrix[row][col]\n\n if len(listEntities)==0:\n continue\n #print(listEntities)\n for entity in listEntities:\n entity=entity.replace(\"wd::\",\"\")\n #print(\"e: \", entity)\n entityClasses=wikidataDAO.getClasses(entity)\n if entityClasses is None:\n continue\n for classe in entityClasses:\n\n actualClass=dictColClasses.get(colname).get(classe)\n if actualClass is None:\n dictColClasses[colname][classe]=1\n else:\n dictColClasses[colname][classe]+=1\n coli+=1\n table.setColumnClasses(dictColClasses)\n table.setTableType(table.tableType.value)\n ft = open(os.path.join(FOLDER_TABLES_OUT, str(table.tableId.replace(\".\", \"_\")) + \".json\"), \"w\")\n ft.write(json.dumps(table.reprJSON(), cls=ComplexEncoder, skipkeys=True))\n ft.close()", "def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df", "def _load_table(table: Model, directory: Path, format_: str):\n\n if directory is not None:\n print(f\" Loading {table.table_name()}...\")\n in_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())\n print(f\" Importing {table.table_name()} into the database...\")\n table.insert_many(dataset.dict).execute()\n print(\" Done.\")\n print(\"=====================\")\n else:\n pass\n # print(dataset.export(\"csv\"))", "def dump_metadata(inputpdf, tempdir):\n metadatafile = os.path.join(tempdir, \"metadata.txt\")\n cmd_dump_metadata = CMD_DUMP_METADATA.format(inputpdf=inputpdf, metadatafile=metadatafile)\n os.system(cmd_dump_metadata)\n return metadatafile", "def pdf():\n env.file_ext = \".pdf\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))", "def release(self) -> None:\n # Write release files to output directory\n if not self.csv_path.parent.exists():\n self.csv_path.parent.mkdir()\n self.table.to_csv(self.csv_path, index=False)\n self.write_feather(self.table)\n write_frequencies(self.table, str(self.frequencies_path))\n Documentation().write(self.dataverse)\n\n # Zip the output files\n with ZipFile(self.csv_path.with_suffix('.zip'), 'w', ZIP_DEFLATED) as zip:\n zip.write(self.csv_path, self.csv_path.name)\n zip.write(self.frequencies_path, self.frequencies_path.name)\n [zip.write(doc_path, doc_path.name) for doc_path in self.csv_path.parent.glob('*.md')]\n\n # Validate docs\n self.check_documentation(self.table)", "def do(self, pdf_path: str) -> (str, list, list):\n hocr_list, images = self.processPdf(pdf_path)\n hocr_final = self.combineHocr(hocr_list, pdf_path.split('/')[-1])\n return hocr_final, hocr_list, images", "def isPdf(page):\n return page['data'][:4] == '%PDF'", "def __load_gt(df_table, df_idx, p_csv):\n idx_start = df_idx\n with open(p_csv, 'rb') as f_csv:\n for line in f_csv:\n # decoding french letters and mapping them to UTF-8\n str_line = line.decode('utf-8', 'replace').strip()\n # gt annotations may end with empty lines\n if str_line == '':\n continue\n # parsing line\n img_path, transcript = str_line.split(self.sep)\n img_path = img_path.encode('ascii', 'ignore').decode('ascii', 'ignore')\n # parsing french to ASCII\n transcript = transcript#.lower()\n transcript = self.to_ascii(transcript)\n # determine img path\n img_path = os.path.join(self.imgs_path, img_path)\n # filling table\n # if self.lower_case:\n # transcript = transcript.lower()\n if not os.path.isfile(img_path):\n pass\n # raise FileNotFoundError('No image exists at {}'.format(img_path))\n warnings.warn('No image exists at {}'.format(img_path))\n continue\n if not self.lazy_loading:\n self.form_imgs[img_path] = np.array(Image.open(img_path)).astype(np.uint8)\n # storing relevant data\n df_table['transcription'].append(transcript)\n df_table['form_path'].append(img_path)\n df_table['word_id'].append(df_idx)\n df_table['bbox'].append([])\n df_idx += 1\n return df_table, df_idx, list(range(idx_start, df_idx))", "def detect_table_from_pdf_page(\n self,\n page_tokens: List[lp.TextBlock],\n page_image: Union[\"Image\", np.ndarray],\n table_region_proposals: List[lp.TextBlock] = None,\n ) -> List[Table]:\n if not isinstance(page_image, np.ndarray):\n page_image = np.array(page_image)\n\n if table_region_proposals is None:\n table_region_proposals = self.detect_table_region_proposals([page_image])[0]\n\n tables = []\n for table in table_region_proposals:\n table_tokens = page_tokens.filter_by(table, center=True)\n table = union_blocks(table_tokens)\n table_image = np.array(table.crop_image(page_image))\n # Slightly rectify the table region based on the contained tokens\n\n columns = self.identify_table_columns(table, table_image)\n if columns is None: # This is not a valid table, drop it.\n continue\n\n rows = self.identify_table_rows(table, columns, table_tokens, table_image)\n if rows is None: # This is not a valid table, drop it.\n continue\n\n table = Table.from_columns_and_rows(table, table_tokens, columns, rows)\n tables.append(table)\n return tables", "def ocr_many_pages() -> Path:\n return Path(__file__).parent / 'data' / 'ocr_many_pages.pdf'", "def generate_coordinates(file,id):\n # Open a PDF file.\n fp = open(settings.STATIC_ROOT+\"/tests/\"+str(id)+\"/\"+file, 'rb')\n parser = PDFParser(fp)\n document = PDFDocument(parser)\n if not document.is_extractable:\n raise PDFTextExtractionNotAllowed\n rsrcmgr = PDFResourceManager()\n device = PDFDevice(rsrcmgr)\n laparams = LAParams()\n\n # Create a PDF page aggregator object.\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n\n # Create a PDF interpreter object.\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n\n # loop over all pages in the document\n i=0\n content = {}\n\n for page in PDFPage.create_pages(document):\n i+=1\n content[i] = [[],[]]\n # read the page into a layout object\n interpreter.process_page(page)\n layout = device.get_result()\n # extract text from this object\n parse_obj(layout._objs,content[i])\n\n return content", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def sa_summary_pdf(sa_id):\n pass", "def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')", "def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()", "def convertAnnotatedPDF(fname, refNrPath, origPDF):\n #tempdir is where I will save in between files\n try:\n os.mkdir(\"tempDir\")\n except:\n pass\n print(fname+\" is being exported.\")\n\n # get info on origin pdf\n input1 = PdfFileReader(open(origPDF, \"rb\"))\n npages = input1.getNumPages()\n pdfsize = input1.getPage(0).mediaBox\n pdfx = int(pdfsize[2])\n pdfy = int(pdfsize[3])\n # rM will not create a file when the page is empty so this is a\n # placeholde empty file to use.\n rm2svg(emptyRm, \"tempDir/emptyrm.svg\", coloured_annotations=True,\n x_width=pdfx, y_width=pdfy)\n\n # find what the page hashes are\n content = json.loads(open(refNrPath + \".content\").read())\n # convert all pages\n pdflist = []\n for pg, pg_hash in enumerate(content['pages']):\n # print(pg)\n rmpath = refNrPath + \"/\" + pg_hash + \".rm\"\n if os.path.isfile(rmpath):\n rm2svg(rmpath, \"tempDir/temprm\" + str(pg) + \".svg\", coloured_annotations=False, x_width=pdfx, y_width=pdfy)\n svg_path = \"tempDir/temprm\" + str(pg) + \".svg\"\n else:\n svg_path = \"tempDir/emptyrm.svg\"\n convertSvg2PdfCmd = \"\".join([\"rsvg-convert -f pdf -o \", \"tempDir/temppdf\" + str(pg), \".pdf \", svg_path])\n os.system(convertSvg2PdfCmd)\n pdflist.append(\"tempDir/temppdf\"+str(pg)+\".pdf\")\n # merge the annotated pages\n merged_rm = \"tempDir/merged_rm.pdf\"\n os.system(\"convert \"+ (\" \").join(pdflist)+\" \"+merged_rm)\n # stamp extracted annotations onto original with pdftk\n stampCmd = \"\".join([\"pdftk \", origPDF, \" multistamp \", merged_rm, \" output \", origPDF[:-4], \"_annot.pdf\"])\n os.system(stampCmd)\n # Remove temporary files\n shutil.rmtree(\"tempDir\", ignore_errors=False, onerror=None)\n return True", "def extract(self):\n\n table = self.soup.table\n table_rows = table.find_all('tr')\n\n for tr in table_rows:\n td = tr.find_all('td')\n data = [item.text for item in td]\n\n #Extracting each row and save it in a file as an object\n try:\n \n with open('data.txt', 'wb') as file:\n my_pickler = pickle.Pickler(file)\n my_pickler.dump(data)\n\n except Exception as exc:\n print('Error while saving data :', exc)\n\n #saving data in our dictionaries\n try: \n \n with open('data.txt', 'rb') as file:\n my_dipeck = pickle.Unpickler(file)\n my_data = my_dipeck.load()\n self.deaths_per_country[my_data[0]] = my_data[3]\n self.cases_per_country[my_data[0]] = my_data[1]\n\n except Exception as exc2:\n print('Error while loading data :', exc2)", "def download_postcode_areas() -> pd.DataFrame:\n\n uk_cities_postcodes = \"https://en.wikipedia.org/wiki/List_of_postcode_areas_in_the_United_Kingdom\"\n\n postcodes_tables = pd.read_html(uk_cities_postcodes)\n postcode_table = postcodes_tables[0]\n\n print(\"Saving the postcodes....\")\n output_path = path.join(\"../..\", \"datasets\", \"uk_postcodes\", f\"postcodes.csv\")\n postcode_table.to_csv(output_path)\n print(\"Saving the postcodes....DONE\")\n return postcode_table", "def _pdread2astrotable(csvgzdir):\n df = pd.read_csv(csvgzdir)\n tb = Table.from_pandas(df)\n return tb", "def clean_PDF(submission):\n src = submission.file_upload.file.name\n pdf1 = PdfFileReader(src)\n merger = PdfFileMerger(strict=False, )\n merger.append(pdf1, import_bookmarks=False)\n merger.addMetadata({'/Title': '',\n '/Author': '',\n '/Creator': '',\n '/Producer': ''})\n fd, temp_file = tempfile.mkstemp(suffix='.pdf')\n merger.write(temp_file)\n merger.close()\n os.close(fd)\n shutil.move(temp_file, src) # replace the original PDF on the server", "def fetch_texas_wind():\n data_file = POOCH.fetch(\"texas-wind.csv\")\n data = pd.read_csv(data_file)\n return data", "def create_pdf(file_path: Path) -> None:\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size=12)\n with open(file_path, \"r\") as file:\n for line in file:\n pdf.cell(200, 10, txt=line, ln=1)\n pdf_dir_path: Path = Path(calculate_path(file_path))\n Path(pdf_dir_path.parent).mkdir(parents=True, exist_ok=True)\n pdf_path: str = f\"{str(pdf_dir_path)}.pdf\"\n pdf.output(pdf_path)", "def ocr_tsv_to_ocrdf(tsv_result):\n \n if tsv_result:\n try: \n df_result = pd.read_csv(io.StringIO(tsv_result), sep='\\t', quotechar='', quoting=3) # no quoting\n except Exception as err:\n from utilities import logs\n logs.exception_report(f\"Exception encountered in converting tsv_result from pytesseract: {err}\\n\"\n f\"pytesseract result:\\n {tsv_result}\")\n import pdb; pdb.set_trace()\n return None \n return df_result\n return None", "def read_table_data(self, table):\n data = []\n index = 0\n for row in table.rows:\n data.append([])\n for cell in row.cells:\n text_data = ''\n for para in cell.paragraphs:\n text_data += para.text.strip(' ')\n data[index].append(text_data)\n index += 1\n\n # trim unneeded rows in old & new reports\n if all('CAPA' in x for x in data[0]):\n self.table_data = data[2:]\n else:\n self.table_data = data[1:]\n # trim end of list\n self.table_data = [row[:5] for row in self.table_data]", "def readPDF(infile, width, grayscale=True): \n\n #To open a pdf file.\n imgAllPages = convert_from_path(infile, dpi=100)\n img = imgAllPages[0] #pick first page up\n img = np.asarray(img)\n img = img.take([1,2,0], axis=2) #change color ch. (GBR -> RGB)\n \n #To scale image to designated width.\n if img.shape[1] != width:\n height = int(round(img.shape[0] / img.shape[1] * width))\n img = cv2.resize(img, (width, height), \n interpolation = cv2.INTER_CUBIC)\n\n #To convert image in grayscale. \n if grayscale:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n return img \n #}}}", "def _compute_single_pdf(self, **kwargs):\n raise NotImplementedError", "def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")", "def read_from_text(self) -> str:\r\n with open(self.destination, 'r', encoding='utf8') as f:\r\n pdf = f.read()\r\n return pdf", "def tabdes(filename, body):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n data = []\n with open(filename, \"rb\") as f:\n buffer = f.read()\n _, _, count, length, _ = head.unpack_from(buffer, 0)\n offset = head.size\n for i in range(count):\n row = body.unpack_from(buffer, offset)\n data.append(row)\n offset += body.size\n else:\n print(\"read %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # _, foot.unpack_from(buffer, offset))\n return data" ]
[ "0.7241174", "0.6659181", "0.65428925", "0.6456565", "0.63844985", "0.6363576", "0.6349044", "0.6338092", "0.6214082", "0.6197532", "0.6154468", "0.6104792", "0.6099473", "0.60386115", "0.6013377", "0.6007071", "0.60045767", "0.59880614", "0.595533", "0.5941718", "0.5934649", "0.5918883", "0.59087735", "0.58845776", "0.5862737", "0.5837867", "0.58184105", "0.58136", "0.57838434", "0.57579374", "0.5721656", "0.5675486", "0.5663953", "0.5660576", "0.5627252", "0.5623385", "0.5602921", "0.5572633", "0.55702835", "0.55645835", "0.55623597", "0.5559974", "0.555065", "0.5547996", "0.55375016", "0.55290306", "0.5520695", "0.54991174", "0.5481076", "0.54500127", "0.544973", "0.54469633", "0.54334843", "0.5423825", "0.5411394", "0.540891", "0.5406844", "0.5389198", "0.5362767", "0.53576714", "0.53534764", "0.5344477", "0.53424853", "0.53356594", "0.5330137", "0.5314891", "0.5309612", "0.5292509", "0.5291853", "0.5290868", "0.5281867", "0.5271433", "0.52609277", "0.5260702", "0.52571976", "0.52389467", "0.52323073", "0.5230435", "0.52255195", "0.5222135", "0.5218316", "0.52171504", "0.5215868", "0.5214338", "0.5208911", "0.5198628", "0.5195585", "0.5181003", "0.51749265", "0.5160915", "0.5160634", "0.5156106", "0.5151789", "0.5147911", "0.5145332", "0.5142045", "0.51411575", "0.51322246", "0.5127998", "0.51243997" ]
0.5661401
33
Create instance of PyRPS. redis_url Redis instance address (tuple containing (hostname, port)). namespace Namespace to separate Pub/Sub instance from another running on the same redis host.
def __init__(self, namespace, redis_url=("localhost", 6379)): self.namespace = namespace if isinstance(redis_url, tuple): self.redis = StrictRedis(host=redis_url[0], port=redis_url[1]) elif isinstance(redis_url, str): self.redis = StrictRedis(host=redis_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)", "def __init__(self):\n try:\n config = redis_settings[\"REDIS_BACKEND\"]\n self.servers = config[\"servers\"]\n self.port = config[\"port\"]\n self.db = config[\"db\"]\n self.password = config[\"password\"]\n # r = redis.Redis('10.66.136.84', '6379', 0,password=\"xsw2CDE#vfr4\")\n #r = redis.Redis('10.66.136.84', '6379', 0)\n self.redis = Redis(self.servers, self.port, self.db,\n password=self.password, socket_timeout=1)\n except Exception, e:\n print \"Redis YAMLConfig Error :\", e\n logging.error(e)", "def __init__(self, host, port):\n self.r = redis.StrictRedis(host=host, port=port)", "def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r", "def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use the Redis default\n if not redis_url:\n redis_url = 'redis://localhost:6379'\n urlparse.uses_netloc.append('redis')\n url = urlparse.urlparse(redis_url)\n return redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n db=0,\n password=url.password\n )", "def __init__(self, config):\n self.r = redis.StrictRedis(host=config['REDIS_HOST'],\n port=config['REDIS_PORT'],\n db=config['REDIS_DB'])", "def connect_redis(uri):\n puri = urlparse.urlparse(uri)\n host = puri.hostname\n port = puri.port\n password = puri.password if puri.password else ''\n db_name = puri.path.split('/')[1]\n r = redis.Redis(host=host, port=port, password=password, db=db_name)\n assert r.ping()\n return r", "def __init__(self, settings):\n\n self.r = redis.Redis(\n host=settings['hostname'],\n port=settings['port']\n )\n\n # set the redis list name for storing jobs\n self.joblist = settings['joblistname']", "def connect(self):\n self.connection = redis.Redis(\n host=self.host,\n port=self.port,\n socket_connect_timeout=self.timeout,\n socket_timeout=self.timeout\n )", "def _conn_redis(self) -> Redis:\n return Redis(host=self._REDIS_DB_HOST, port=self._REDIS_DB_PORT, db=0,decode_responses=True)", "def create_redis_connection(app=None):\n\n if app:\n app.logger.info('Instantiated new redis connection.')\n\n redis_connection = redis.StrictRedis(\n host=\"localhost\",\n port=6379,\n db=0\n )\n\n if not redis_connection.exists('last_queue_idx'):\n redis_connection.set('last_queue_idx', 0)\n\n return redis_connection", "def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))", "def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)", "def get_redis_client():\n return redis.from_url(settings.REDIS_URI)", "def _connect(self):\n self.connection = RedisConnection(self.host, self.port, self.dbname)", "def __init__(self, redis_connection=None):\n self._redis_connection = redis_connection or get_websocket_redis_connection()", "def redis_conn_pool(self) -> ConnectionPool:\n if self._redis_conn_pool is None:\n if self._config[\"graph_redis_pool_block\"]:\n pool_class: Callable = BlockingConnectionPool\n else:\n pool_class = ConnectionPool\n\n if self._config[\"graph_redis_pool_gevent_queue\"]:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n queue_class=gevent.queue.LifoQueue,\n )\n\n else:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n )\n\n self._redis_conn_pool = redis_conn_pool\n\n self._logger.debug(\n \"[%s]: Initialized Redis connection pool: %s\",\n self.__name__,\n self._redis_conn_pool,\n )\n\n return self._redis_conn_pool", "def get_redis_server():\n return redis_server", "def get_redis(**kwargs):\n redis_cls = kwargs.pop('redis_cls', DEFAULT_REDIS_CLS)\n url = kwargs.pop('url', None)\n if url:\n return redis_cls.from_url(url, **kwargs)\n else:\n return redis_cls(**kwargs)", "def __init__(self, host, redis_port, ssh_user, use_ssh=True):\n\n if use_ssh:\n forwarder = create_tunnel(host=host, port=redis_port, ssh_user=ssh_user)\n self.connection = redis.StrictRedis(host=forwarder.bind_address, port=forwarder.bind_port, db=0)\n else:\n self.connection = redis.StrictRedis(host=host, port=redis_port, db=0)", "def _connect_to_redis(self):\n for name, config in settings.STREAM_REDIS_CONFIG.items():\n self._redis_client = tornadoredis.Client(host=config['host'],\n port=config['port'],\n password=config['password'],\n connection_pool=pool)\n self._redis_client.connect()", "def __init__(self):\n self._redis = redis.Redis(host=\"localhost\", port=6379)\n self._redis.flushdb()", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def get_redis() -> redis.Redis:\n global redis_conn\n if not redis_conn:\n host = app.config.get(\"REDIS_HOST\", \"127.0.0.1\")\n port = app.config.get(\"REDIS_PORT\", \"6379\")\n db = app.config.get(\"REDIS_DB\", \"0\")\n redis_conn = redis.Redis(host=host, port=port, db=db)\n\n return redis_conn", "def get_redis():\n return redis.StrictRedis(host='redis', port=6379)", "def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD):\n self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)", "def __init__(self):\n self._rcon = None\n self._host = CONFIG.redis.host\n self._port = CONFIG.redis.port\n self._db = CONFIG.redis.database\n self.refresh()", "def _connect(self):\n try: \n self.r = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n except:\n raise", "def __init__(self, handlers=None, default_host=\"\", transforms=None,\n wsgi=False, **settings):\n tornado.web.Application.__init__(self, handlers, default_host, transforms, wsgi, **settings)\n self._rc = redis.StrictRedis(**(settings.get('redis_config', {}))) # redis client: one per application\n self._rcps = self._rc.pubsub() # redis pubsub obj: one per application\n self._sub_cbs = {} # redis pubsub callbacks: one per subscription\n self._sub_cmd_q = 'q_sub_cmds_' + uuid4().hex # TODO: could make a shorter ID just based on tornado server ID\n self._rcps.subscribe(self._sub_cmd_q)\n listener = threading.Thread(target=self._rc_listen)\n listener.setDaemon(True)\n listener.start()", "def __call__(self, settings):\n self.clear() # make sure you can reconfigure the client\n db = settings.get('redis.db', 0)\n config = {'db': int(db)}\n if ('redis.unix_socket_path' in settings and\n settings['redis.unix_socket_path'] is not None):\n config['unix_socket_path'] = settings['redis.unix_socket_path']\n elif ('redis.url' in settings and\n settings['redis.url'] is not None): # should default to\n # `redis://localhost:6379`\n # Unpack.\n url = settings['redis.url']\n\n # Parse into a config dict.\n o = self.parse_url(url)\n config.update({\n 'host': o.hostname,\n 'port': o.port,\n })\n if o.password:\n config['password'] = o.password\n\n max_connections = settings.get('redis.max_connections', None)\n if max_connections is not None:\n config['max_connections'] = int(max_connections)\n config = {'connection_pool': self.pool_cls(**config)}\n else:\n raise pyramid.exceptions.ConfigurationError(\n \"\"\"To use redis with pyramid, redis.url or\n redis.unix_socket_path should be provided\"\"\"\n )\n self.update(config)\n return self", "def __init__(self, r, handlers):\n\t\tthreading.Thread.__init__(self)\n\t\tself.redis = r\n\t\tself.pubSub = self.redis.pubsub()\n\t\tself.handlers = handlers\n\t\tchannels = []\n\t\tfor k, v in self.handlers.items():\n\t\t\tchannels.append(k)\n\t\tself.pubSub.subscribe(channels)\n\t\tlog.info(\"Subscribed to redis pubsub channels: {}\".format(channels))", "def redis_client(self) -> Redis:\n if self._redis_client is None:\n redis_client = Redis(connection_pool=self.redis_conn_pool)\n\n self._redis_client = redis_client\n\n self._logger.debug(\n \"[%s]: Initialized Redis client: %s\", self.__name__, self._redis_client\n )\n\n return self._redis_client", "def __init__(self, uuid=None):\n\n atexit.register(self.__del__)\n self._stop = Event()\n #: Separate Thread for handling messages\n self._proc = None\n #: Redis connection\n self._redis = get_redis_connection()\n # pylint: disable=E1123\n self._pubsub = self._redis.pubsub(ignore_subscribe_messages=True)\n\n if uuid:\n self.uuid = uuid\n else:\n self.uuid = str(uuid4())\n\n if not REGISTRY.exists(self.uuid):\n REGISTRY.register(self.uuid)\n else:\n self.uuid = None\n raise UUIDInUseException('UUID is already taken')", "def __call__(self, settings, registry=None):\n\n # If called without a registry, i.e.: not within the context of a\n # Pyramid application, then register the connection pool in a\n # zope.component registry.\n if registry is None:\n registry = self.get_registry()\n\n # Query the registry for a client_configuration. If it doesn't exist,\n # instantiate and register one for next time.\n redis_client_conf = registry.queryUtility(IRedisClientConfiguration)\n if not redis_client_conf:\n redis_client_conf = self.config(settings) # update RedisClientConf\n self.provides(self.config, IRedisClientConfiguration)\n registry.registerUtility(self.config,\n IRedisClientConfiguration)\n\n # And use it to instantiate a redis client.\n return self.redis_cls(**redis_client_conf)", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def connection():\n global _connection\n if _connection is None:\n _connection = StrictRedis.from_url(REDIS_URL)\n return _connection", "def __init__(self, **kwargs):\n self._categories_key = kwargs.get('categories_key', 'categories')\n self._metric_slugs_key = kwargs.get('metric_slugs_key', 'metric-slugs')\n self._gauge_slugs_key = kwargs.get('gauge_slugs_key', 'gauge-slugs')\n\n self.connection_class = kwargs.pop('connection_class', app_settings.CONNECTION_CLASS)\n\n if self.connection_class:\n package, module = self.connection_class.rsplit('.', 1)\n self.r = getattr(import_module(package), module)()\n else:\n self.host = kwargs.pop('host', app_settings.HOST)\n self.port = kwargs.pop('port', app_settings.PORT)\n self.db = kwargs.pop('db', app_settings.DB)\n self.password = kwargs.pop('password', app_settings.PASSWORD)\n self.ssl = kwargs.pop('ssl', app_settings.SSL)\n self.socket_timeout = kwargs.pop(\n 'socket_timeout',\n app_settings.SOCKET_TIMEOUT\n )\n self.connection_pool = kwargs.pop(\n 'connection_pool',\n app_settings.SOCKET_CONNECTION_POOL\n )\n\n # Create the connection to Redis\n self.r = redis.StrictRedis(\n host=self.host,\n port=self.port,\n db=self.db,\n password=self.password,\n ssl=self.ssl,\n socket_timeout=self.socket_timeout,\n connection_pool=self.connection_pool,\n decode_responses=True\n )", "def connect_server(self):\n redis_host = \"localhost\"\n redis_port = 6379\n redis_password = \"\"\n # step 3: create the Redis Connection object\n try:\n\n # The decode_repsonses flag here directs the client to convert the responses from Redis into Python strings\n # using the default encoding utf-8. This is client specific.\n self.r = redis.StrictRedis(host=redis_host, port=redis_port,\n password=redis_password, decode_responses=True)\n\n # step 4: Set the hello message in Redis\n self.r.set(\"msg:hello\", \"Hello World!!!\")\n\n # step 5: Retrieve the hello message from Redis\n msg = self.r.get(\"msg:hello\")\n print(msg)\n\n except Exception as e:\n print(e)", "def __init__(self):\n self._redis = redis.Redis()\n self._redis.flushdb()", "def _CreatePubsubClient():\n client = pubsub_client.PubSubClient()\n client.CreateTopic(DEVICE_NOTE_PUBSUB_TOPIC)\n client.CreateTopic(HOST_NOTE_PUBSUB_TOPIC)\n return client", "def __init__(self, redis_conn, namespace_deliminator=':',\n bitop_ttl=DEFAULT_TTL, bucket_func=None):\n self._redis_conn = redis_conn\n self._namespace_deliminator = namespace_deliminator\n self._bitop_ttl = bitop_ttl\n if not bucket_func:\n bucket_func = granularity.daily\n self.bucket_func = bucket_func", "def make_redis_store(uri):\n result = urlparse(uri)\n scheme = result.scheme.lower()\n if not result.scheme.startswith('redis'):\n raise ValueError('not a redis uri')\n host = result.hostname\n port = result.port\n database = int(result.path[1:])\n if result.password:\n password = unquote(result.password)\n else:\n password = None\n if scheme == 'redis+legacy':\n class_ = redis.Redis\n else:\n class_ = redis.StrictRedis\n store = class_(\n host, port, database, password,\n socket_timeout=SOCKET_TIMEOUT,\n socket_connect_timeout=SOCKET_CONNECT_TIMEOUT\n )\n return store", "def __init__(self, redis_conn, key):\n self.redis_conn = redis_conn\n self.key = key", "def createNamespace(self):\r\n raise NotImplementedError('Endpoint can not be used directly.')", "def redis_from_url(url, db=None, charset='utf-8', errors='strict',\n decode_responses=False, socket_timeout=None, **kwargs):\n url = urlparse.urlparse(url)\n\n # We only support redis:// schemes.\n assert url.scheme == 'redis' or not url.scheme\n\n # Extract the database ID from the path component if hasn't been given.\n if db is None:\n try:\n db = int(url.path.replace('/', ''))\n except (AttributeError, ValueError):\n db = 0\n\n # TODO: unix domain sockets\n pool = redis.ConnectionPool(connection_class=Connection,\n host=url.hostname, port=int(url.port or 6379), db=db,\n password=url.password, decode_responses=decode_responses,\n encoding=charset, encoding_errors=errors,\n socket_timeout=socket_timeout)\n\n return redis.StrictRedis(connection_pool=pool, **kwargs)", "async def create_subscription(user: int, redis: RedisDB):\n subscription_data = {\n \"subscriber_id\": user.id,\n \"cost\": str(os.getenv(\"AMOUNT\")),\n \"currency\": \"NANO\",\n \"period\": int(os.getenv(\"PERIOD\"))\n }\n json_data = json.dumps(subscription_data)\n r = requests.post(f\"{os.getenv('API_ENDPOINT')}create_subscription?token={os.getenv('NR_TOKEN')}\", json_data)\n rx = r.json()\n await redis.set(user.id, rx['subscription_id'])\n return r.json()", "async def connect(self):\n self.client = await asyncio_redis.Connection.create(\n host=self.host,\n port=self.port,\n db=self.database,\n auto_reconnect=self.reconnect,\n password=self.password,\n )", "def __init__(self, port=1071):\n\n context = zmq.Context()\n\n self.socket = context.socket(zmq.REP)\n self.socket.bind('tcp://*:' + str(port))\n\n self.socket.recv()", "def __init__(self):\n fd = open(\"conf/redis_config.json\", \"r\")\n tmp = fd.read()\n data = json.loads(tmp)\n self.database = redis.StrictRedis(\n host=data[\"host\"], \n port=data[\"port\"], \n password=None,\n decode_responses=True\n )\n self.key = data[\"key\"]", "def get_connection(self, redis_prefix):\n return self.get_app().extensions['redis'][redis_prefix]", "def get_redis_client(host='localhost', port=6379, db=0):\n host = os.environ.get('REDIS_HOST') or host\n port = os.environ.get('REDIS_PORT') or port\n return StrictRedis(host=host, port=port, db=db)", "def connect_to_db(self):\n r = redis.Redis(host=self.hostname,\n port=self.portnumber,\n password=self.password)\n try:\n r.ping()\n except redis.ConnectionError:\n sys.exit('ConnectionError: is the redis-server running?')\n self.r = r", "def run_redis_example():\n\n try:\n r = redis.StrictRedis(host=host, port=port, password=pw,\n decode_responses=True)\n except Exception as e:\n print(f'Error connecting to Redis DB: {e}')\n\n return r", "def __init__(self, amqp_url):\n self._connection = None\n self._channel = None\n self._url = amqp_url", "def redis_port():\n docker_client = docker.Client(version='auto')\n download_image_if_missing(docker_client)\n container_id, redis_port = start_redis_container(docker_client)\n yield redis_port\n docker_client.remove_container(container_id, force=True)", "def init(conf, expire=0):\n\n\t# Pull in the module variable\n\tglobal _moRedis, _muiExpire\n\n\t# Create the Redis connection\n\t_moRedis = StrictRedis(**conf)\n\n\t# Store the expire time\n\t_muiExpire = expire", "def __init__(__self__, *,\n configuration: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n enable_non_ssl_port: Optional[pulumi.Input[bool]] = None,\n shard_count: Optional[pulumi.Input[int]] = None,\n sku: Optional[pulumi.Input['RedisCacheSpecPropertiesSkuArgs']] = None,\n static_ip: Optional[pulumi.Input[str]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None):\n if configuration is not None:\n pulumi.set(__self__, \"configuration\", configuration)\n if enable_non_ssl_port is not None:\n pulumi.set(__self__, \"enable_non_ssl_port\", enable_non_ssl_port)\n if shard_count is not None:\n pulumi.set(__self__, \"shard_count\", shard_count)\n if sku is not None:\n pulumi.set(__self__, \"sku\", sku)\n if static_ip is not None:\n pulumi.set(__self__, \"static_ip\", static_ip)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)", "def _get_redis(self, config):\n expected_options = {'host', 'port', 'db_id'}\n _warn_on_extra(set(config.options('redis')) - expected_options -\n self.defaults, 'redis section option(s)')\n\n get = partial(config.get, 'redis')\n getint = partial(config.getint, 'redis')\n\n self.redis_host = get('HOST')\n self.redis_port = getint('PORT')\n self.redis_db_id = getint('DB_ID')", "def token_redis_connection():\n if not hasattr(current_app, 'auth0_redis_conn'):\n config = current_app.config.copy()\n config['REDIS_DB'] = config['AUTH0_REDIS_DB']\n # return everything as strings\n config['REDIS_DECODE_RESPONSES'] = True\n if config.get('USE_FAKE_REDIS', False):\n from fakeredis import FakeStrictRedis\n conn = FakeStrictRedis(decode_responses=True)\n else:\n conn = make_redis_connection(config)\n setattr(current_app, 'auth0_redis_conn', conn)\n return getattr(current_app, 'auth0_redis_conn')", "def set_redis_server(server):\n redis_server = server", "def dbConnect(self):\n r = redis.StrictRedis()\n try:\n r = redis.from_url(os.environ.get(\"REDIS_URL\"))\n print(\"DB Connection seems okay!\")\n except Exception as error:\n print (\"Oops! An exception has occured:\", error)\n print (\"Exception TYPE:\", type(error))\n r = None\n finally:\n return r", "def create_instance(c_instance):\n return RpycHost(c_instance)", "def connect_redis(conn):\n # Don't pass empty password to the client\n if not conn.get('password', None):\n conn.pop('password', None)\n\n return redis.StrictRedis(**conn)", "def new(configuration: Mapping[str, Any], loop: AbstractEventLoop) \\\n -> ProxyProtocol:\n return SocksProxy(loop)", "def init_redis_client(\n experiment_secrets: Secrets) -> RedisManagementClient:\n return __azure_client_factory(\"RedisManagementClient\", Secrets)", "def __init__(self, ip='127.0.0.1', port='50020'):\n self.ip = ip \n self.port = port\n self.ctx = zmq.Context()\n self.socket = zmq.Socket(self.ctx, zmq.REQ) # this is pub socket", "def __init__(self, job_key, task_key, host=REDIS.HOST,\n port=REDIS.PORT, dbname=REDIS.DBNAME):\n self.host, self.port, self.dbname = host, port, dbname\n super(RedisConnector, self).__init__()\n self.job_key = job_key\n self.task_key = task_key\n self.db_lock = threading.RLock()", "def redis_client(self) -> Redis:\n return self.app.key_value_store.redis_client", "def __init__(self, redis, logger, console_logger=None):\n super().__init__(redis, logger, console_logger)", "def __init__(self, redis, logger, console_logger=None):\n super().__init__(redis, logger, console_logger)", "def __init__(self, redis, logger, console_logger=None):\n super().__init__(redis, logger, console_logger)", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def pubsub(self, **kwargs):\n if not self._pubsub:\n self._pubsub = Pubsub(self, **kwargs)\n return self._pubsub", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def setup(self):\n self.context = zmq.Context()\n self.sub_socket = self.context.socket(zmq.SUB)\n if self.filter:\n self.sub_socket.setsockopt(zmq.SUBSCRIBE, self.filter)\n self.sub_socket.connect('tcp://'+self.host+':'+str(self.com_port))\n return self", "def __init__(self, app: NDNApp, client_prefix: FormalName, repo_prefix: FormalName):\n self.app = app\n self.client_prefix = client_prefix\n self.repo_prefix = repo_prefix\n self.pb = PubSub(self.app, self.client_prefix)", "def __init__(self, context, redis_client):\n self._context = context\n self._redis_client = redis_client", "def reconnect(self):\n try:\n self.redis = Redis(self.servers, self.port, self.db)\n except Exception, e:\n print e", "def getRedisHandler(self):\n (redis_ip, redis_port) = self.parserBns()\n return self.getHandlerByIp(redis_ip, redis_port)", "def conn(self):\n if self._sentinel:\n return self._sentinel.master_for(self._sentinel_name)\n if not self._conn:\n self._conn = self.__redis_mod.StrictRedis(\n host=self._host, port=self._port, **self._conn_kwargs\n )\n return self._conn", "def __init__(self, worker_id=0, base_port=5005):", "def __init__(self, *args, **kwargs):\n Redis.__init__(self, *args, **kwargs)\n \n # Set the module commands' callbacks\n MODULE_CALLBACKS = {\n self.CREATE_CMD : bool_ok,\n self.ADD_CMD : int_or_none,\n self.INCRBY_CMD : bool_ok,\n self.DECRBY_CMD : bool_ok,\n self.CREATERULE_CMD : bool_ok,\n self.DELETERULE_CMD : bool_ok,\n self.RANGE_CMD : parse_range,\n self.MRANGE_CMD : parse_m_range,\n self.GET_CMD : lambda x: (int(x[0]), float(x[1])),\n self.INFO_CMD : parse_info,\n }\n for k, v in six.iteritems(MODULE_CALLBACKS):\n self.set_response_callback(k, v)", "def connect(self, **kwargs):\n\n self.__db = redis.Redis(**kwargs)\n try:\n self.__db.info()\n self.connected = True\n except redis.ConnectionError as e:\n self.logger.error(\"Failed to connect to Redis server: \", e)\n raise QueueNotConnectedError(e)\n\n return True", "def __init__(\n self,\n state: str,\n redis_object: Redis,\n namespace: str | None = None,\n fallback_circuit_state: str = STATE_CLOSED,\n cluster_mode: bool = False,\n ):\n\n # Module does not exist, so this feature is not available\n if not HAS_REDIS_SUPPORT:\n raise ImportError(\n \"CircuitRedisStorage can only be used if the required dependencies exist\"\n )\n\n super().__init__(\"redis\")\n\n self._redis = redis_object\n self._namespace_name = namespace\n self._fallback_circuit_state = fallback_circuit_state\n self._initial_state = str(state)\n self._cluster_mode = cluster_mode\n\n self._initialize_redis_state(self._initial_state)", "def __init__(self, config, opsdroid=None):\n super().__init__(config, opsdroid=opsdroid)\n self.config = config\n self.client = None\n self.host = self.config.get(\"host\", \"localhost\")\n self.port = self.config.get(\"port\", 6379)\n self.database = self.config.get(\"database\", 0)\n self.password = self.config.get(\"password\", None)\n self.reconnect = self.config.get(\"reconnect\", False)", "def setup(self):\n\t\ttry:\n\t\t\tdatabase = redis.StrictRedis(host=self.HOST, port=self.PORT, db=self.DB)\n\n\t\t\tself.logger.info(\"Successfully established Redis connection.\")\n\n\t\t\treturn database\n\n\t\texcept redis.exceptions.ConnectionError as err:\n\t\t\traise err", "def subscribe_to_ticks_publisher(topic):\n ConfigFile = \"../config/kuber.conf\"\n config = configparser.ConfigParser()\n config.read(ConfigFile)\n\n zmq_conf = config['ZMQ CONFIGURATION']\n publish_port = zmq_conf['publish_port']\n\n print(\"Subscribing to topic %s at %s\" % (topic, publish_port))\n sub = TopicSubscriber()\n\n try: \n sub.init(topic, publish_port)\n except Exception as e:\n print(\"\"\"\n Subscriber init failed: {}\n \"\"\".format(e))\n sys.exit(0)\n\n # Return the subscriber context.\n return sub", "def initialize(redis_connection=None, prefix=None):\n assert isinstance(redis_connection, StrictRedis),\\\n \"redis_connection must be instance of StrictRedis\"\n # TODO: Consider allowing _db to be a function so that it\n # can reference a pool\n RedisEntity._db = redis_connection\n RedisEntity._prefix = prefix", "def __init__(self, hostname: str, port: int):\n # Create a dictionary of topics and callbacks\n self.callback_dict = dict()\n\n self.client = mqtt.Client(userdata=self.callback_dict)\n self.client.on_message = _on_message_handler\n self.client.connect(hostname, port, 60)", "def register_publisher(self, hostname, expire=-1):", "def _connect(self):\n try:\n rcon = redis.StrictRedis(self._host, self._port, self._db)\n # Return the connection only if is valid and reachable\n if not rcon.ping():\n return None\n except (redis.ConnectionError, redis.RedisError) as exc:\n LOG.error(\"Failed to connect to Redis Server: %s\", exc)\n return None\n\n return rcon", "def add_pool(name, **kwargs):\n _CONNECTIONS[name] = redis.StrictRedis(**kwargs)", "def from_settings(cls, settings):\n server = connection.get_redis(settings.getdict(\"REDIS_CONFIG\"))\n # XXX: This creates one-time key. needed to support to use this\n # class as standalone dupefilter with scrapy's default scheduler\n # if scrapy passes spider on open() method this wouldn't be needed\n # TODO: Use SCRAPY_JOB env as default and fallback to timestamp.\n key = DEFAULT_DUPEFILTER_KEY % {'timestamp': int(time.time())}\n return cls(server, key=key)", "def _create_pub(name, rostype, *args, **kwargs):\n # counting publisher instance per topic name\n if name in TopicBack.pub_instance_count.keys():\n TopicBack.pub_instance_count[name] += 1\n else:\n TopicBack.pub_instance_count[name] = 1\n\n return rospy.Publisher(name, rostype, *args, **kwargs)", "def __init__(__self__, *,\n properties: pulumi.Input['RedisCacheFirewallRuleSpecPropertiesArgs'],\n redis_cache: pulumi.Input[str],\n resource_group: pulumi.Input[str]):\n pulumi.set(__self__, \"properties\", properties)\n pulumi.set(__self__, \"redis_cache\", redis_cache)\n pulumi.set(__self__, \"resource_group\", resource_group)", "def getHandlerByIp(self, redis_ip, redis_port):\n pool = redis.ConnectionPool(host=redis_ip, port=redis_port, db=0)\n return redis.StrictRedis(connection_pool = pool)", "def redis(self, redis):\n\n self._redis = redis", "def getInstance():\n return net()" ]
[ "0.65858567", "0.6510327", "0.6420053", "0.6199663", "0.6159909", "0.61511815", "0.6020774", "0.5985737", "0.593212", "0.5925699", "0.5880181", "0.5879944", "0.5878096", "0.5852507", "0.5827931", "0.5803593", "0.5802949", "0.5757128", "0.56726545", "0.56652087", "0.5645896", "0.56363237", "0.5621862", "0.5621862", "0.5597433", "0.55327207", "0.5463311", "0.5443462", "0.53979987", "0.5382986", "0.5367961", "0.5330473", "0.5319351", "0.5296478", "0.5279673", "0.5268525", "0.52471524", "0.52360356", "0.5226857", "0.5226259", "0.5212484", "0.5212345", "0.5190087", "0.5143219", "0.51300454", "0.51217985", "0.51207566", "0.51206905", "0.5119644", "0.51107144", "0.50958323", "0.50800186", "0.5070987", "0.5032237", "0.5026948", "0.5025513", "0.4990248", "0.49872357", "0.49726516", "0.4961382", "0.4947408", "0.49371275", "0.49253497", "0.49218637", "0.49191964", "0.49146557", "0.49145415", "0.49102616", "0.48921847", "0.48853716", "0.48853716", "0.48853716", "0.48753843", "0.48520058", "0.48508495", "0.4846989", "0.48406413", "0.48292178", "0.48128396", "0.4808105", "0.4795043", "0.47945896", "0.47857094", "0.476133", "0.47475323", "0.47450334", "0.47410113", "0.47341043", "0.4732001", "0.4730968", "0.47263834", "0.47211584", "0.47085539", "0.4696256", "0.46923828", "0.46910372", "0.46882778", "0.4681417", "0.46740767", "0.46693495" ]
0.7303635
0
Subscribe to message queue. Yields messages as they appear in the queue. queue Queue name consumer_id Consumer name
def subscribe(self, queue, consumer_id): # Add myself to the list of consumers, if not already present. self.redis.sadd(self._ns_subscriptions(queue), consumer_id) return Subscription(self, queue, consumer_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_message(self):\n while self.queue.consuming:\n yield self.queue.channel._consume_message()", "def _consume(self):\n # HACK: run_in_executor is used as a workaround to use boto\n # inside a coroutine. This is a stopgap solution that should be\n # replaced once boto has support for asyncio or aiobotocore has\n # a stable release.\n loop = asyncio.get_event_loop()\n receive_message = partial(\n self.client.receive_message,\n QueueUrl=self.app.settings['SQS_INBOUND_QUEUE_URL'],\n AttributeNames=self.app.settings['SQS_ATTRIBUTE_NAMES'],\n MessageAttributeNames=self.app.settings['SQS_MESSAGE_ATTRIBUTES'],\n MaxNumberOfMessages=self.app.settings['SQS_MESSAGE_BATCH_SIZE'],\n VisibilityTimeout=self.app.settings['SQS_VISIBILITY_TIMEOUT'],\n WaitTimeSeconds=self.app.settings['SQS_WAIT_TIME'],\n )\n while True:\n future = loop.run_in_executor(None, receive_message)\n messages = yield from future\n for message in messages.get('Messages', []):\n message['Body'] = json.loads(message['Body'])\n yield from self._message_queue.put(message)", "def subscribe(self, queue, action):\n self.channel.queue_declare(queue=queue)\n self.channel.basic_consume(queue=queue,\n on_message_callback=action,\n auto_ack=True)\n self.channel.start_consuming()", "def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)", "def consume():\n with conn.channel() as chan:\n \n def on_msg_recv(msg):\n \"\"\" Called when message arrives from RabbitMQ\n \"\"\"\n print \"processor|%s::Received message: %s\" % (UID, msg.body)\n chan.basic_ack(msg.delivery_tag)\n log(msg.body)\n process_msg(msg)\n \n \n # Declare and bind queue. RabbitMQ does nothing if queue already exists.\n chan.exchange_declare(exchange = EXCHANGE,\n type = EXCHANGE_TYPE)\n queue = chan.queue_declare(QUEUE)\n chan.queue_bind(exchange = EXCHANGE, \n queue = QUEUE, \n routing_key = ROUTING_KEY)\n \n # Declare that we are going to listen to given queue\n chan.basic_consume(queue = QUEUE, \n callback = on_msg_recv)\n \n # Main loop. Waiting for messages from RabbitMQ.\n while True:\n chan.wait()", "def subscribe(self, callback):\n self.channel.basic_consume(callback, queue=self.queue_name)\n self.channel.start_consuming()", "def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()", "def start_consuming(self, channel, rx_queue_name):\n if self.should_stop():\n logger.info(\"ready to stop, pause to consume\")\n return\n logger.info('Issuing consumer related RPC commands')\n self._consumer_tag = channel.basic_consume(\n self.on_message, rx_queue_name, auto_ack = False)\n channel.start_consuming()", "def subscribe(self):\n pubsub = self.redis_client.pubsub()\n pubsub.subscribe(self.message_channel)\n for item in pubsub.listen():\n if item.get(\"data\") not in (1, None):\n yield item", "def data_generator():\n msg = Message(Message.ADD, queue.uuid, queue)\n PROVIDER_MQ.put(msg)\n keep_running = True\n while keep_running:\n try:\n chunk = queue.get()\n yield chunk\n except Empty:\n app.logger.info('Queue empty. Ending stream')\n keep_running = False", "def subscribeConsumer(consumer):", "def sqs_messages(queue: str) -> Generator[Dict[str, Any], None, None]:\n\n while True:\n response = get_client(\"sqs\").receive_message(QueueUrl=queue)\n if \"Messages\" not in response:\n break\n msg = json.loads(response[\"Messages\"][0][\"Body\"])\n records = json.loads(msg[\"Message\"])\n retd = {}\n retd[\"key\"] = records[\"Records\"][0][\"s3\"][\"object\"][\"key\"]\n retd[\"bucket\"] = records[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n retd[\"ReceiptHandle\"] = response[\"Messages\"][0][\"ReceiptHandle\"]\n yield retd", "def start_consuming(self):\n self.logger.debug(\"Issuing consumer related RPC commands\")\n\n self._channel.basic_qos(prefetch_count=self._max_concurrent)\n self._channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\n consume_kwargs = {\"queue\": self._queue_name}\n if PIKA_ONE:\n consume_kwargs[\"on_message_callback\"] = self.on_message\n else:\n consume_kwargs[\"consumer_callback\"] = self.on_message\n\n self._consumer_tag = self._channel.basic_consume(**consume_kwargs)", "def start_consuming(self):\n logger.info('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n logger.info(\"[{}] Waiting for messages on exchange {}\".format(self.bot_id, self.exchange))\n self._consumer_tag = self._channel.basic_consume(self.on_message,\n self.queue_name)", "def _create_incoming_queue(self):\n\n connection = self.connection\n session = self.connection.session\n uuid = str(qpid.datatypes.uuid4())\n\n incoming = session.incoming(uuid)\n session.message_subscribe(\n queue=self.name,\n destination=uuid,\n )\n\n try:\n yield incoming\n finally:\n try:\n incoming.stop()\n except connection.backend.connection_errors:\n pass\n with session.lock:\n try:\n del session._incoming[uuid]\n except KeyError:\n pass", "async def consume_items_from_rabbitmq(queue):\n ctr = 0\n start = time.time()\n while True:\n await asyncio.sleep(0.001)\n for method_frame, properties, body in channel.consume(queue_name, inactivity_timeout=1):\n if method_frame:\n # print(body)\n while queue.full():\n await asyncio.sleep(0.001)\n # await queue.put(body)\n queue.put_nowait(body)\n # Acknowledge the message\n channel.basic_ack(method_frame.delivery_tag)\n ctr += 1\n if not ctr % 1000:\n end = time.time() - start\n print(f'elapsed time: {end:.3f}\\tmessages received: {ctr}')\n else:\n # empty remaining items from queue\n while queue.qsize():\n await asyncio.sleep(0.001)\n end = time.time() - start\n print(f'elapsed time: {end:.3f}\\tmessages received: {ctr}')\n break\n await asyncio.sleep(0.001)\n\n requeued_messages = channel.cancel()", "def retrieve(self) -> Iterator[SQSMessage]:\n while True:\n try:\n sqs = SQSClientFactory(boto3).from_env()\n\n res = sqs.receive_message(\n QueueUrl=self.queue_url,\n WaitTimeSeconds=3,\n MaxNumberOfMessages=10,\n )\n\n messages = res.get(\"Messages\", [])\n if not messages:\n LOGGER.info(\"queue was empty\")\n\n s3_events = [SQSMessage(msg) for msg in messages]\n for sqs_message in s3_events:\n yield sqs_message\n\n sqs.delete_message(\n QueueUrl=self.queue_url,\n ReceiptHandle=sqs_message.receipt_handle,\n )\n\n except Exception as e:\n LOGGER.error(traceback.format_exc())\n time.sleep(2)", "def get_messages_from_queue(fx):\n\n for msg in queue.receive_messages():\n fx(msg)", "def consume_messages(process_func: Callable[[str], None]):\n consumer = get_consumer()\n\n for message in consumer:\n log.debug(f'Received a message: {message}')\n try:\n process_func(message.value)\n except Exception as e:\n log.error(f'Failed to process a message: {message.value}')\n log.exception(e)", "def __iter__(self):\n # Trigger the consumer procs to start off.\n # We will iterate till there are no more messages available\n self.size.value = 0\n self.pause.set()\n\n while True:\n self.start.set()\n try:\n # We will block for a small while so that the consumers get\n # a chance to run and put some messages in the queue\n # TODO: This is a hack and will make the consumer block for\n # at least one second. Need to find a better way of doing this\n meta, message = self.queue.get(block=True, timeout=1)\n except Empty:\n break\n\n # Count, check and commit messages if necessary\n self.offsets[meta.partition] = message.offset + 1\n self.start.clear()\n self.count_since_commit += 1\n self._auto_commit()\n yield message\n\n self.start.clear()", "def consumeMsg():\n\tosuser = 'osdev'\n\tospass = 'osdev'\n\toshost = '10.32.29.94'\n\tosport = '5672'\n\tosvhost = '/openstack'\n\tneutronExchange = Exchange('quantum', type='topic', durable=False)\n\tinfoQueue = Queue('exthook', exchange=neutronExchange , durable=False,\n\t\t\trouting_key='notifications.info')\n\twith Connection(\"\".join(['amqp://', osuser, ':', ospass, '@', \n\t\toshost, ':',osport, '/', osvhost])) as conn:\n\t\twith conn.Consumer(infoQueue, callbacks=[msgParse]):\n\t\t\twhile True:\n\t\t\t\ttry: \n\t\t\t\t\tconn.drain_events()\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogging.exception('Draining events from AMQP stop')\n\t\t\t\t\tbreak", "def __iter__(self):\n # Trigger the consumer procs to start off.\n # We will iterate till there are no more messages available\n self.size.value = 0\n self.events.pause.set()\n\n while True:\n self.events.start.set()\n try:\n # We will block for a small while so that the consumers get\n # a chance to run and put some messages in the queue\n # TODO: This is a hack and will make the consumer block for\n # at least one second. Need to find a better way of doing this\n partition, message = self.queue.get(block=True, timeout=1)\n except queue.Empty:\n break\n\n # Count, check and commit messages if necessary\n self.offsets[partition] = message.offset + 1\n self.events.start.clear()\n self.count_since_commit += 1\n self._auto_commit()\n yield message\n\n self.events.start.clear()", "def message_listener(self, topic, timeout):\n \"\"\"\n demo_message = [\n {'user_id': 'Lazy Man', 'timestamp': '2019-10-06T22:59:59.989Z', 'risk_level': 3}\n ]\n\n for message in demo_message:\n yield ERROR_CODE_ZERO, \"\", message\n \"\"\"\n\n while True:\n for error_code, error_message, message in self._consumer.subscribe(topic, timeout):\n yield error_code, error_message, message\n if error_code == 1:\n break", "async def consumer(message):\n # TODO\n print(message)", "def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)", "def test_get_messages1(self):\n self.queue.direct_declare(TEST_QUEUE)\n self.queue.publish(TEST_QUEUE, 'this is a test msg')\n\n messages = self.queue.get_messages(TEST_QUEUE, prefetch_count=1)\n assert len(messages) >= 1", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def read(self):\n if not self._consuming:\n yield from self._begin_consuming()\n return (yield from self._message_queue.get())", "def subscribe2API():\n\tconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n\tchannel = connection.channel()\n\n\tchannel.queue_declare(queue='ToAPIgatewayQueue')\n\n\tdef callback(ch, method, properties, body):\n\t\tif(body != ''):\t\n\t\t\tconnection.close()\n \t\tpublish2apiHandler(body)\n\t\t\t\n\t\t\t\n\t\t\t\n\tchannel.basic_consume(callback, queue='ToAPIgatewayQueue', no_ack=True)\n\n\tprint(' [*] Waiting for messages. To exit press CTRL+C')\n\tchannel.start_consuming()\n\t\n\treturn", "def _begin_consuming(self):\n self._consuming = True\n loop = asyncio.get_event_loop()\n self._message_queue = asyncio.Queue(\n maxsize=self.app.settings['SQS_PREFETCH_LIMIT'],\n loop=loop,\n )\n loop.create_task(self._consume())", "def subscribe_sqs_queue(self, topic, queue):\r\n t = queue.id.split('/')\r\n q_arn = 'arn:aws:sqs:%s:%s:%s' % (queue.connection.region.name,\r\n t[1], t[2])\r\n resp = self.subscribe(topic, 'sqs', q_arn)\r\n policy = queue.get_attributes('Policy')\r\n if 'Version' not in policy:\r\n policy['Version'] = '2008-10-17'\r\n if 'Statement' not in policy:\r\n policy['Statement'] = []\r\n statement = {'Action' : 'SQS:SendMessage',\r\n 'Effect' : 'Allow',\r\n 'Principal' : {'AWS' : '*'},\r\n 'Resource' : q_arn,\r\n 'Sid' : str(uuid.uuid4()),\r\n 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}\r\n policy['Statement'].append(statement)\r\n queue.set_attribute('Policy', json.dumps(policy))\r\n return resp", "async def produce_consume(topic_name):\n await asyncio.create_task(produce(topic_name))", "def publish(self, queue, message, ttl=3600):\n\n # Get next message ID\n message_id = self.redis.incr(self._ns_nextid())\n\n # Push message to queue\n self.redis.setex(self._ns_message(queue, message_id), ttl, message)\n \n # List all consumers of given queue\n consumers = self.redis.smembers(self._ns_subscriptions(queue))\n\n # Publish the message to all the consumers.\n for consumer in consumers:\n self.redis.rpush(self._ns_queue(queue, consumer), message_id)", "def consume_message(message):\n # Assign the message to the global drone_message\n global drone_message\n drone_message = message\n # The Rabbit mq runs in the localhost and the username , password is\n # athavan\n credentials = pika.PlainCredentials('guest', 'guest')\n # Pass the mqhost , port , virtualhost and credentials\n parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)\n connection = pika.SelectConnection(parameters, on_connected)\n try:\n connection.ioloop.start()\n except KeyboardInterrupt:\n # close the connnection\n connection.close()\n # loop until we are fully closed. It will stop on its own\n connection.ioloop.start()", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def _listen_queue(self, queue, callback):\n # Listen buy/sell orders from external system\n self._logger.info(f\"Declaring rabbit queue {queue}\")\n self._consumer_rabbit_channel.queue_declare(queue=queue, durable=True, auto_delete=True)\n self._logger.info(f\"Declaring callback to rabbit queue: {queue}, callback: {callback}\")\n self._consumer_rabbit_channel.basic_consume(queue, callback,\n consumer_tag=queue)", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "async def decoder(\n queue: asyncio.Queue[ConsumerPayload],\n consumer: AIOKafkaConsumer,\n) -> int:\n log.info(\"decoder: starting\")\n try:\n async for msg in consumer:\n if msg.value == EOT:\n log.info(\"decoder: EOT received\")\n break\n log.info(\n \"consumed: %s %s %s %s %s %s\",\n msg.topic,\n msg.partition,\n msg.offset,\n msg.key,\n msg.value,\n msg.timestamp,\n )\n bpayload: bytes = msg.value\n payload: ConsumerPayload = json.loads(bpayload)\n await queue.put(payload)\n except Exception:\n log.exception(\"decoder: exception\")\n log.info(\"decoder: exiting\")\n return 0", "def _consumer(self) -> None:\n while (data := self._q.get()) is not None:\n write_data(data, self.writer)\n self._q.task_done()\n else:\n logging.info(\"None received. Queue consumed.\")\n self._q.task_done()\n return", "def _ns_subscriptions(self, queue):\n return self._ns(queue, \"consumers\")", "def subscribe_to_commands(self):\n self.basic_consume(self.process_command, queue=self.name)", "def subscribe(self, queue, action=None):\n if action:\n self.broker.subscribe(queue, action)\n else:\n self.broker.subscribe(queue)", "def consume(self):\n LOGGER.debug('Consumer Initialized')\n # self.connect()\n channel = self.get_channel()\n self._bind_things(channel)\n\n try:\n LOGGER.info('Start consuming')\n channel.start_consuming()\n except ConnectionClosed:\n LOGGER.exception('Pika connection closed detected. Will attempt to start consuming again')\n self.consume()\n except KeyboardInterrupt as e:\n LOGGER.info('Keyboard interrupt, stop consuming')\n self.shutdown()\n raise e\n except Exception as e:\n LOGGER.exception(\"'%s\" % str(e))\n self.shutdown()\n if self.settings.CONSUMER['RAISE_EXCEPTION']:\n LOGGER.info(\"CONSUMER RAISED EXCEPTION\")\n raise e", "def read_queue():\n with open(\"config.json\") as f:\n config = json.load(f)\n \n cpars = pika.ConnectionParameters(host=config[\"cage_daq\"])\n connection = pika.BlockingConnection(cpars)\n channel = connection.channel()\n\n channel.exchange_declare(exchange=config[\"exchange\"], exchange_type='topic')\n \n channel.queue_declare(queue=config[\"queue\"], exclusive=True)\n \n channel.queue_bind(queue=config[\"queue\"], exchange=config[\"exchange\"], \n routing_key='sensor_value.#')\n \n channel.basic_consume(queue=config[\"queue\"], on_message_callback=callback, \n auto_ack=True)\n\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()", "def consume_message(\n engine, cert_folder=CERT_FOLDER, service_uri=SERVICE_URI, topic_name=TOPIC_NAME\n):\n\n consumer = get_consumer(\n cert_folder=cert_folder, service_uri=service_uri, topic_name=topic_name\n )\n\n if consumer is None:\n sys.exit(1)\n\n tries = 0\n\n for message in consumer:\n data = None\n\n try:\n # Read message and get the data (dict).\n data = read_message(message=message)\n\n except Exception as error: # pylint: disable=broad-except\n tries += 1\n logger.error(\"Consumer could not read message due to %s.\", error)\n\n # Exit if number of tries exceeds the number of max read tries.\n if tries > MAX_READ_TRIES:\n sys.exit(1)\n\n finally:\n if data is not None:\n # Send the data to PostgreSQL server.\n insert_data(engine=engine, data=data)", "def receive_messages(project, subscription_name):\n subscriber = pubsub_v1.SubscriberClient()\n subscription_path = subscriber.subscription_path(\n project, subscription_name)\n\n def callback(message):\n loaded_data = json.loads(message.data.decode('utf-8'))\n\n insert_to_table(loaded_data)\n message.ack()\n\n subscriber.subscribe(subscription_path, callback=callback)\n\n # The subscriber is non-blocking, so we must keep the main thread from\n # exiting to allow it to process messages in the background.\n print('Listening for messages on {}'.format(subscription_path))\n while True:\n time.sleep(60)", "def consume(queue):\n result = queue.get() # consumer removes data from the queue\n # if there is no data on the queue, get() blocks, and\n # the consumer process waits\n \n print(result) # prints \"('image.jpg', 'scaled_image.jpg')\"", "def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')", "def listen(self):\n result = self.channel.queue_declare(queue=self.config['queue'], \n exclusive=True)\n if self.endpoints is not None:\n for key in self.endpoints:\n self.channel.queue_bind(exchange=self.config['exchange'], \n queue=self.config['queue'],\n routing_key=f\"sensor_value.{key}\")\n else:\n self.channel.queue_bind(exchange=self.config['exchange'],\n queue=self.config['queue'],\n routing_key=\"sensor_value.#\")\n \n self.channel.basic_consume(queue=self.config['queue'], \n on_message_callback=self.decode_values, \n auto_ack=True)\n\n # starts a while-type loop\n print(\"wabbit eatin hay\")\n self.channel.start_consuming()", "def consumer(queue, event, txt_file):\n while not event.is_set() or not queue.empty():\n message = queue.get()\n txt_file.write(message+'\\n')\n logging.info(\n \"Consumer storing message: %s (size=%d)\", message, queue.qsize()\n )\n\n logging.info(\"Consumer received event. Exiting\")", "def run(self):\n self.channel.queue_declare(self._request_queue)\n self.channel.basic_consume(self._request_queue, self.on_message)\n try:\n msg = \"Waiting for message ...\"\n print(msg)\n logging.info(msg)\n self.channel.start_consuming()\n except KeyboardInterrupt:\n self.channel.stop_consuming()\n\n self.connection.close()", "def listen_for_messages(self, callback):\n # generate get requests for all input queues\n requests = [port.in_queue.get() for port in self.ports]\n while requests:\n # helper variable for the asserts\n queues_with_pending_requests = [req.resource for req in requests]\n # There is a request for each input queue.\n assert set(self.input_queues) == set(queues_with_pending_requests)\n # For each input queue there's exactly one request.\n assert (\n len(queues_with_pending_requests) ==\n len(set(queues_with_pending_requests)))\n\n log.debug(\"{} waiting for next reception\".format(self))\n completed_requests = (yield self.env.any_of(requests))\n received_messages = list(completed_requests.values())\n log.debug(\"{} received {}\".format(\n self, received_messages))\n\n callback(received_messages)\n\n # Only leave the requests which have not been completed yet\n remaining_requests = [\n req for req in requests if req not in completed_requests]\n # Input queues that have been emptied since the last wake up.\n emptied_queues = [req.resource for req in completed_requests]\n # Add new get requests for the input queues that have been emptied.\n new_requests = []\n for input_queue in emptied_queues:\n new_requests.append(input_queue.get())\n requests = remaining_requests + new_requests", "async def read(self) -> None:\n make_non_blocking(self.stream)\n\n while not self.stream.closed:\n message = None\n try:\n message = await self.read_one()\n\n if not message:\n await self.sleep()\n continue\n else:\n self.wake()\n\n IOLoop.current().add_callback(self.queue.put_nowait, message)\n except Exception as e: # pragma: no cover\n self.log.exception(\n \"%s couldn't enqueue message: %s (%s)\", self, message, e\n )\n await self.sleep()", "def linkRabbit(self):\n\n print(\"Listening for RabbitMQ messages\")\n\n # RabbitMQ setup\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n\n #channel.exchange_declare(exchange='freqSweep', exchange_type='fanout')\n channel.exchange_declare(exchange='pwrSweep', exchange_type='fanout')\n\n result = channel.queue_declare(queue='', exclusive=True)\n queue_name = result.method.queue\n\n # channel.queue_bind(exchange='freqSweep', queue=queue_name)\n channel.queue_bind(exchange='pwrSweep', queue=queue_name)\n channel.basic_consume(queue=queue_name, on_message_callback=self.rabbitCallback, auto_ack=True)\n channel.start_consuming()", "def queue_consumer(self, q):\n\n self.status = 'Running...'\n\n while True:\n try:\n msg = q.get_nowait()\n if msg is None:\n break\n self.update_plot(msg)\n except Queue.Empty:\n time.sleep(0.1)\n\n self.status = 'Done'", "def process(self, message=None):\n\n while self.running:\n message = self.channel.basic.get(self.queue)\n if message:\n content = message.body\n\n # log message\n if self.debug:\n self.log(\"Recieved: \" + str(content))\n\n # send to child nodes\n self.scatter(Message(**self.parse(content)))\n else:\n # yield to other greenlet\n # self.tick()\n self.sleep(1)", "def receive():\n now = time.time()\n end = now + MAX_DURATION\n tmp = None\n # Heroku doesn't notify when clients disconnect so we have to impose a\n # maximum connection duration.\n while now < end:\n if not tmp:\n tmp = AsyncResult()\n BROADCAST_QUEUE.put(tmp)\n try:\n yield tmp.get(timeout=KEEP_ALIVE_DELAY)\n tmp = None\n except Timeout:\n yield ''\n now = time.time()", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def __iter__(self):\n while True:\n m = self.recv(timeout=1.0)\n if m is not None:\n yield m\n logger.debug(\"done iterating over bus messages\")", "def receive_message(self, message: MessageEnvelope, queue: IMessageQueue):\n self.__callback(message, queue)", "def _messages_list(self, queue):\n\n return queue.messages()", "def next(self): # wait for 5 minutes after sending message\n if self.queue:\n messages = self.queue.get_messages(1,visibility_timeout=self.visibility_timeout)\n if messages:\n for m in messages:\n return m\n raise StopIteration", "def consume(self):\n\n self.consumer = self.getConsumer(self.client.topics[self.topic])\n\n # create splunk hec instance\n splunk_hec = hec(self.splunk_server,\n self.splunk_hec_port,\n self.splunk_hec_channel,\n self.splunk_hec_token,\n self.splunk_sourcetype,\n self.splunk_source,\n self.use_https,\n self.verify_ssl,\n self.use_compression,\n self.compresslevel)\n while(True):\n m = self.consumer.consume()\n \n # Append messages to list until we've hit self.batch_size\n if(len(self.messages) <= self.batch_size):\n self.messages.append(m.value)\n\n # Send messages to Splunk HEC\n if(len(self.messages) == self.batch_size):\n retry(self.sendToSplunk,\n attempts=self.retry_attempts,\n sleeptime=self.sleeptime,\n max_sleeptime=self.max_sleeptime,\n sleepscale=self.sleepscale,\n jitter=self.jitter,\n retry_exceptions=(Exception,),\n args=(splunk_hec,))", "def queue_iter(queue: Queue) -> Generator[T, None, None]:\n while True:\n val = queue.get()\n yield val", "def process_outgoing_queue(self, message_queue: \"Queue[Packet]\") -> None:\n while not message_queue.empty() > 0:\n curr_obs = message_queue.get()\n try:\n channel = self._get_channel_for_agent(curr_obs.subject_id)\n except Exception:\n channel = self.channels[curr_obs.subject_id]\n channel.enqueue_send(curr_obs)", "def on_message(self, channel_id, message):\n logger.access('-- SlimPatternSubscriberManager subscribe, channel_id: %s, message: %s', channel_id, message)\n\n clients = None\n for key in self.clients.iterkeys():\n # redis 仅支持 glob-style 的正则\n if fnmatch.fnmatchcase(channel_id, key):\n clients = self.clients.get(key, None)\n break\n\n if clients is None:\n return\n\n bad_clients = []\n for client in clients:\n if client.is_alive():\n client.on_sub_notification(channel_id, message)\n else:\n bad_clients.append(client)\n\n for client in bad_clients:\n clients.remove(client)\n\n if not clients:\n del self.clients[channel_id]\n self.subscriber.punsubscribe(channel_id)", "def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)", "def consume_messages(self):\n\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n while method_frame:\n\n LOGGER.info(\"Message received\")\n\n self.channel.basic_ack(method_frame.delivery_tag)\n payload = json.loads(body)\n if not isinstance(payload, dict):\n return\n\n # Process the message\n if 'control' in payload:\n LOGGER.info(\"A control signal received!\")\n # self.set_control(payload['control'])\n print(payload['control'])\n\n # Continue getting messages\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n # TODO\n # return control_signal", "def start_consuming(self):\n # LOGGER.info('Issuing consumer related RPC commands')\n if self._init_ok_ctrl and self._init_ok_task:\n self._channel_ctrl.add_on_cancel_callback(self.on_consumer_ctrl_cancelled)\n self._channel_task.add_on_cancel_callback(self.on_consumer_task_cancelled)\n self._consumer_tag_task = self._channel_task.basic_consume(\n self.queue_task,\n auto_ack=False,\n on_message_callback=self.on_message\n )\n self._consumer_tag_ctrl = self._channel_ctrl.basic_consume(\n self._topic_queue_name,\n auto_ack=False,\n on_message_callback=self.on_topic\n )\n self.was_consuming = True\n self._consuming = True", "def receive_message(\n self, queue_name, max_message_count: int | None = 1, max_wait_time: float | None = None\n ):\n if queue_name is None:\n raise TypeError(\"Queue name cannot be None.\")\n\n with self.get_conn() as service_bus_client, service_bus_client.get_queue_receiver(\n queue_name=queue_name\n ) as receiver, receiver:\n received_msgs = receiver.receive_messages(\n max_message_count=max_message_count, max_wait_time=max_wait_time\n )\n for msg in received_msgs:\n self.log.info(msg)\n receiver.complete_message(msg)", "def queue(self, name):\n # First create a queue\n queue = self.inbound_channel.declare_queue(name)\n\n # Create the registry for the queue\n registry = Registry(self, queue)\n\n # Prepare consuming queue with registry\n self.inbound_channel.consume(queue=queue, callback=registry)\n\n # Then, return the Registry object.\n return registry", "def kafka_payment_consumer_worker(mq: queue.Queue):\n global app_config\n\n # Client\n consumer = KafkaConsumer('payment',\n bootstrap_servers=bootstrap_servers,\n value_deserializer=lambda item: json.loads(item.decode('utf-8')))\n\n\n\n while not t_stop_event.is_set():\n try:\n # Message loop\n for message in consumer:\n logging.info(\"READING MESSAGE %s:%d:%d: key=%s value=%s\" % (\n message.topic,\n message.partition,\n message.offset,\n message.key,\n message.value)\n )\n\n # simple sanitizer\n if (\"action\" not in message.value) \\\n or (\"message\" not in message.value) \\\n or (\"request\" not in message.value[\"message\"]):\n logging.info(\"MALFORMED MESSAGE value=%s SKIPPING\" % (message.value,))\n continue\n\n # Action switch\n if str(message.value[\"action\"]).upper() == \"NOTIFY_DELIVERY_RESPONSE\":\n logging.info(\"MESSAGE <NOTIFY_DELIVERY_RESPONSE> RECEIVE\") # Mocked\n \"\"\"logging.info(\"PUT credit_deliverer MESSAGE in QUEUE\")\n mq.put(\n credit_deliverer()\n )\"\"\"\n except Exception as e:\n logging.fatal(e, exc_info=True)\n # Post routine\n\n consumer.close()\n return", "def join(self, queue_name):\n while True:\n size = 0\n for name in (queue_name, dq_name(queue_name)):\n size += self.client.hlen(self._add_namespace(name + \".msgs\"))\n\n if size == 0:\n return\n\n time.sleep(1)", "async def receive(pub_endpoint: str, topic: str, indicator_queue: asyncio.Queue):\n global logger\n socket = zmq.Context().socket(zmq.SUB)\n socket.connect(f\"tcp://{pub_endpoint}\")\n socket.setsockopt(zmq.SUBSCRIBE, topic.encode())\n poller = zmq.Poller()\n poller.register(socket, zmq.POLLIN)\n logger.info(f\"Receiving via ZMQ on topic {pub_endpoint}/{topic}\")\n while True:\n socks = dict(\n poller.poll(timeout=100)\n ) # note that smaller timeouts may increase CPU load\n if socket in socks and socks[socket] == zmq.POLLIN:\n try:\n topic, msg = socket.recv().decode().split(\" \", 1)\n except Exception as e:\n logger.error(f\"Error decoding message: {e}\")\n continue\n # the topic is suffixed with the message type\n if not topic.endswith(\"indicator\"):\n # pyvast-threatbus is not (yet) interested in Sightings or SnapshotRequests\n logger.debug(f\"Skipping unsupported message: {msg}\")\n continue\n await indicator_queue.put(msg)\n else:\n await asyncio.sleep(0.05) # free event loop for other tasks", "def test_process_message_queue(self):\n t = threading.Thread(target=self.handle_message_queue)\n t.start()\n\n self.dut._process_message_queue()\n\n t.join()", "def _mp_consume(client, group, topic, chunk, queue, start, exit, pause, size):\n\n # Make the child processes open separate socket connections\n client.reinit()\n\n # We will start consumers without auto-commit. Auto-commit will be\n # done by the master controller process.\n consumer = SimpleConsumer(client, group, topic,\n partitions=chunk,\n auto_commit=False,\n auto_commit_every_n=None,\n auto_commit_every_t=None)\n\n # Ensure that the consumer provides the partition information\n consumer.provide_partition_info()\n\n while True:\n # Wait till the controller indicates us to start consumption\n start.wait()\n\n # If we are asked to quit, do so\n if exit.is_set():\n break\n\n # Consume messages and add them to the queue. If the controller\n # indicates a specific number of messages, follow that advice\n count = 0\n\n message = consumer.get_message()\n if message:\n queue.put(message)\n count += 1\n\n # We have reached the required size. The controller might have\n # more than what he needs. Wait for a while.\n # Without this logic, it is possible that we run into a big\n # loop consuming all available messages before the controller\n # can reset the 'start' event\n if count == size.value:\n pause.wait()\n\n else:\n # In case we did not receive any message, give up the CPU for\n # a while before we try again\n time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)\n\n consumer.stop()", "def get_number_message_on_queue(client, queue):\n messages = list()\n channel = client.channel()\n channel.queue_declare(queue=queue, durable=True, auto_delete=False)\n\n try:\n for method_frame, properties, body in channel.consume(\n queue=queue,\n exclusive=True,\n inactivity_timeout=30):\n messages.append({'method_frame': method_frame, 'properties': properties, 'body': body})\n # Acknowledge the message\n channel.basic_ack(method_frame.delivery_tag)\n except TypeError:\n print(\"Fin de la lecture des messages.\")\n\n # Cancel the consumer and return any pending messages\n requeued_messages = channel.cancel()\n logger.info('Requeued %i messages' % requeued_messages)\n\n # Close the channel and the connection\n channel.close()\n return len(messages)", "def receive(self):\n self.stats_reset()\n try:\n self.messages = self.queue.receive_messages(AttributeNames=[\n 'SentTimestamp'\n ],\n MaxNumberOfMessages=self.queue_max,\n MessageAttributeNames=[\n 'All'\n ],\n VisibilityTimeout=0,\n WaitTimeSeconds=0)\n\n\n self.attributes_update()\n self.stats_update('msgs_received', len(self.messages))\n self.stats_update('msgs_total', self.attributes_get('ApproximateNumberOfMessages'))\n self.stats_update('msgs_delayed', self.attributes_get('ApproximateNumberOfMessagesDelayed'))\n self.stats_update('msgs_not_visible', self.attributes_get('ApproximateNumberOfMessagesNotVisible'))\n self.stats_show(prefix=' SQS - Starting Queue: ')\n return True\n except:\n raise", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)", "def start_publishing(self):\n print(f\"{self._connection_param}: Issuing consumer related RPC commands\")\n # self._channel.confirm_delivery(self.on_delivery_confirmation)\n self.schedule_next_message(self.SLOW_SEND)", "def read(self):\n log.info(\"==>\")\n # TODO exceptions\n assert self.subscription_list is not None\n if not self.is_once():\n assert self.read_queue is not None\n event = None\n first_sample = True\n while True:\n log.debug(\"Processing event type %s\", event)\n # SAMPLE is handled in the same way as \"first_sample\"\n if first_sample or event == self.SubscriptionEvent.SAMPLE:\n response = self.sample(\n start_monitoring=self.is_monitor_changes() and first_sample)\n yield response\n if first_sample:\n yield self.sync_response()\n first_sample = False\n if self.is_once():\n break\n elif event == self.SubscriptionEvent.FINISH:\n log.debug(\"finishing subscription read\")\n break\n elif event == self.SubscriptionEvent.SEND_CHANGES:\n response = self.changes()\n log.debug(\"Sending changes\")\n yield from response\n elif event is None:\n log.warning(\"**** event is None ! ****\")\n # TODO error\n break\n else:\n log.warning(\"**** event=%s not processed ! ****\", event)\n # TODO error\n break\n log.debug(\"Waiting for event\")\n event = self.read_queue.get()\n log.debug(\"Woke up event=%s\", event)\n if self.is_monitor_changes():\n self.stop_monitoring()\n\n log.info(\"<==\")", "def kafka_ordering_consumer_worker(mq: queue.Queue):\n global app_config\n\n # Client\n consumer = KafkaConsumer('ordering',\n bootstrap_servers=bootstrap_servers,\n value_deserializer=lambda item: json.loads(item.decode('utf-8')))\n\n while not t_stop_event.is_set():\n try:\n # Message loop\n for message in consumer:\n logging.info(\"READING MESSAGE %s:%d:%d: key=%s value=%s\" % (\n message.topic,\n message.partition,\n message.offset,\n message.key,\n message.value)\n )\n\n # simple sanitizer\n if (\"action\" not in message.value) \\\n or (\"message\" not in message.value) \\\n or (\"request\" not in message.value[\"message\"]):\n logging.info(\"MALFORMED MESSAGE value=%s SKIPPING\" % (message.value,))\n continue\n\n # Action switch\n if str(message.value[\"action\"]).upper() == \"PAYMENT_PLACED\":\n logging.info(\"PUT check_validity MESSAGE in QUEUE\")\n mq.put(\n check_validity(message.value[\"message\"][\"request\"])\n )\n mq.put(\n validate_order(message.value[\"message\"])\n )\n except Exception as e:\n logging.fatal(e, exc_info=True)\n # Post routine\n\n consumer.close()\n return", "def consume(self, timeout=None):\n\n def _raise_timeout(exc):\n raise driver_common.Timeout(str(exc))\n\n timer = driver_common.DecayingTimer(duration=timeout)\n timer.start()\n\n poll_timeout = (self.consumer_timeout if timeout is None\n else min(timeout, self.consumer_timeout))\n\n while True:\n if self._consume_loop_stopped:\n return\n try:\n return self._poll_messages(poll_timeout)\n except kafka.errors.ConsumerTimeout as exc:\n poll_timeout = timer.check_return(\n _raise_timeout, exc, maximum=self.consumer_timeout)\n except Exception:\n LOG.exception(_LE(\"Failed to consume messages\"))\n return", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "def _mp_consume(client, group, topic, queue, size, events, **consumer_options):\n\n # Initial interval for retries in seconds.\n interval = 1\n while not events.exit.is_set():\n try:\n # Make the child processes open separate socket connections\n client.reinit()\n\n # We will start consumers without auto-commit. Auto-commit will be\n # done by the master controller process.\n consumer = SimpleConsumer(client, group, topic,\n auto_commit=False,\n auto_commit_every_n=None,\n auto_commit_every_t=None,\n **consumer_options)\n\n # Ensure that the consumer provides the partition information\n consumer.provide_partition_info()\n\n while True:\n # Wait till the controller indicates us to start consumption\n events.start.wait()\n\n # If we are asked to quit, do so\n if events.exit.is_set():\n break\n\n # Consume messages and add them to the queue. If the controller\n # indicates a specific number of messages, follow that advice\n count = 0\n\n message = consumer.get_message()\n if message:\n while True:\n try:\n queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)\n break\n except queue.Full:\n if events.exit.is_set():\n break\n\n count += 1\n\n # We have reached the required size. The controller might have\n # more than what he needs. Wait for a while.\n # Without this logic, it is possible that we run into a big\n # loop consuming all available messages before the controller\n # can reset the 'start' event\n if count == size.value:\n events.pause.wait()\n\n else:\n # In case we did not receive any message, give up the CPU for\n # a while before we try again\n time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)\n\n consumer.stop()\n\n except KafkaError as e:\n # Retry with exponential backoff\n log.error(\n \"Problem communicating with Kafka (%s), retrying in %d seconds...\" % (e, interval))\n time.sleep(interval)\n interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def _manager_main(self, queue):\r\n for task in self._task_generator():\r\n queue.put(task)", "def consume(self, handler):\n bounded_handler = partial(handler, self)\n self._consume_handler = handler\n self.log.debug(\"Start consuming\")\n self._channel.add_on_close_callback(\n self.on_channel_closed\n )\n self._consumer_tag = self._channel.basic_consume(bounded_handler,\n self.name)\n self.log.debug(\"Consumer tag %s on CHANNEL%i\",\n self._consumer_tag, self._channel.channel_number)", "def receive_subscription_message(\n self,\n topic_name: str,\n subscription_name: str,\n max_message_count: int | None,\n max_wait_time: float | None,\n ):\n if subscription_name is None:\n raise TypeError(\"Subscription name cannot be None.\")\n if topic_name is None:\n raise TypeError(\"Topic name cannot be None.\")\n with self.get_conn() as service_bus_client, service_bus_client.get_subscription_receiver(\n topic_name, subscription_name\n ) as subscription_receiver, subscription_receiver:\n received_msgs = subscription_receiver.receive_messages(\n max_message_count=max_message_count, max_wait_time=max_wait_time\n )\n for msg in received_msgs:\n self.log.info(msg)\n subscription_receiver.complete_message(msg)", "def my_consumer(q):\n while True:\n data = q.get()\n print('data found to be processed: {}'.format(data))\n processed = data * 2\n print(processed)\n\n if data is sentinel:\n break", "def get_generator(self):\n while self._is_running():\n yield self._queue.get()", "def get_generator(self):\n while self._is_running():\n yield self._queue.get()", "def send_messages(self, partition, *msg):\n if self.async:\n for m in msg:\n self.queue.put((partition, create_message(m)))\n resp = []\n else:\n messages = [create_message(m) for m in msg]\n req = ProduceRequest(self.topic, partition, messages)\n try:\n resp = self.client.send_produce_request([req], acks=self.req_acks,\n timeout=self.ack_timeout)\n except Exception as e:\n log.exception(\"Unable to send messages\")\n raise e\n return resp", "def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)", "def stream(\n consumer: Consumer,\n count: Optional[int] = None,\n poll_timeout: Optional[float] = DEFAULT_POLL_TIMEOUT,\n timeout: Optional[float] = None,\n) -> Iterator[Message]:\n if count is not None and count <= 0:\n raise ValueError(\"count must be a positive integer.\")\n if poll_timeout is not None and poll_timeout <= 0:\n raise ValueError(\"poll_timeout must be a positive float.\")\n if timeout is not None and timeout <= 0:\n raise ValueError(\"timeout must be a positive float.\")\n\n if count is None:\n LOGGER.debug(\"Streaming messages....\")\n else:\n LOGGER.debug(\"Streaming up to %d messages....\", count)\n\n if timeout is not None:\n timeout_delta = timedelta(milliseconds=int(timeout * MILLIS_IN_SECOND))\n\n stream_duration = timedelta()\n num_messages = 0\n while count is None or num_messages < count:\n if musekafka.shutdown.is_shutting_down():\n break\n\n if timeout is not None:\n if stream_duration >= timeout_delta:\n LOGGER.debug(\"Hit stream timeout (%.3f seconds).\", timeout)\n break\n poll_timeout = min(\n poll_timeout or timeout, (timeout_delta - stream_duration).total_seconds()\n )\n\n stream_start = datetime.utcnow()\n try:\n message = poll(consumer, timeout=poll_timeout)\n except TimeoutError as e:\n LOGGER.debug(str(e))\n continue\n finally:\n stream_duration += datetime.utcnow() - stream_start\n with bind_message_ctx(message):\n yield message\n num_messages += 1\n LOGGER.debug(\"Completed streaming %d messages.\", num_messages)", "def handle_message_queue(self):\n # Add some dummy messages to the queue\n message = ['dummy1', 'dummy2', 'dummy3']\n for message in messages:\n self.queue.put(message)\n\n time.sleep(1)\n # Send kill event\n self.dut.kill.set()\n\n time.sleep(1)\n\n assertTrue(self.dut.queue_empty.is_set())\n # _process_message_queue is done\n\n # Check that dummy messages are in server socket queue\n for message in messages:\n size, received = self.get_message_from_queue()\n\n self.assertIsNotNone(received)\n if received is None:\n return\n\n self.assertIsNotNone(size)\n if size is None:\n return\n\n fail_size = 'Message length {} Received length {}'.format(len(message),\n size)\n self.assertEqual(size, len(message), msg=fail_size)\n\n fail_contents = 'Message: ({}) Received: ({})'.format(message, received)\n self.assertEqual(message, received, msg=fail_contents)", "def consume(self, queue_name, prefetch=1, timeout=5000):\n return _RedisConsumer(self, queue_name, prefetch, timeout)", "def dispatch_messages(sock, queue, channel):\n while run:\n try:\n message = queue.get()\n except Queue.Empty:\n pass\n else:\n if message.recipient is None:\n message.recipient = channel\n sock.send(\"{0}\\r\\n\".format(message.msg()))\n logging.debug(\"{0}\".format(message.msg()))\n queue.task_done()", "def run(self):\n while self._msg_queue:\n actor, msg = self._msg_queue.popleft()\n try:\n actor.send(msg)\n except StopIteration:\n pass" ]
[ "0.70663434", "0.70589", "0.6912972", "0.6792425", "0.67885274", "0.67721105", "0.67194426", "0.6707331", "0.6677362", "0.66220254", "0.6612172", "0.65917623", "0.64961404", "0.64757645", "0.6447494", "0.64265627", "0.6415559", "0.63817006", "0.6365126", "0.6331941", "0.63177645", "0.63063306", "0.6303739", "0.62995696", "0.6207276", "0.6170381", "0.6149413", "0.6095829", "0.60945505", "0.6089293", "0.6071981", "0.6060776", "0.6056135", "0.6046677", "0.6033786", "0.6007059", "0.5969255", "0.59342057", "0.5854395", "0.5840249", "0.5819531", "0.5808306", "0.57621056", "0.5756976", "0.572384", "0.572355", "0.5718922", "0.5714368", "0.57066655", "0.5695783", "0.5674014", "0.56631935", "0.56584674", "0.56578153", "0.5648602", "0.5642198", "0.5639515", "0.56377244", "0.56354415", "0.56291807", "0.56259066", "0.56250894", "0.56171787", "0.5611864", "0.5606485", "0.5598508", "0.55652076", "0.5561153", "0.55557376", "0.5543437", "0.5542648", "0.5537696", "0.5537431", "0.55355144", "0.5530563", "0.5516677", "0.5504964", "0.54867345", "0.54853487", "0.5481342", "0.54690933", "0.54627544", "0.5457404", "0.5456178", "0.5454842", "0.54531175", "0.54422075", "0.54407424", "0.54396", "0.54382414", "0.5431394", "0.54276377", "0.54276377", "0.54077744", "0.5407601", "0.54011846", "0.5398748", "0.5393725", "0.53857714", "0.538504" ]
0.6136618
27
Publish new message into queue. queue Queue name. message Message data. ttl How long the message should stay alive.
def publish(self, queue, message, ttl=3600): # Get next message ID message_id = self.redis.incr(self._ns_nextid()) # Push message to queue self.redis.setex(self._ns_message(queue, message_id), ttl, message) # List all consumers of given queue consumers = self.redis.smembers(self._ns_subscriptions(queue)) # Publish the message to all the consumers. for consumer in consumers: self.redis.rpush(self._ns_queue(queue, consumer), message_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, queue, message):\n\n # Instead of passing a queue to the constructor, the publish checks if\n # the target queue exists. If not, it declares the target queue\n if not self.queue:\n self.channel.queue_declare(queue=queue)\n self.queue = queue\n\n self.channel.basic_publish(\n exchange='', routing_key=queue, body=message)", "def publish(self, queue, message):\n # 1. Setup the channel to use to publish message\n channel_handler = ChannelHandler(self._connection)\n\n # 2. Open the channel before using it\n channel_handler.open_channel()\n\n # 3. Send the message via the channel\n channel_handler.send_message(self._exchange_name, queue, message)\n\n # 4. Close the channel after publishing the message\n channel_handler.close_channel()\n LOGGER.info('Bellow message `%s` is published in `%s`', message, queue)", "def test_queue_publish(self):\n self.queue_publisher._connect()\n with self.assertLogs(level='INFO') as cm:\n result = self.queue_publisher.publish_message(test_data['valid'])\n self.assertEqual(True, result)\n\n self.assertIn('Published message to queue', cm.output[8])", "def push(message: str, date: datetime.datetime):\n msg_id = str(uuid.uuid4())\n pipeline = connection.pipeline()\n pipeline.set(msg_id, message)\n pipeline.zadd(QUEUE_KEY, {\n msg_id: date.timestamp()\n })\n pipeline.execute()\n logger.info(f'Save a new future email: [message: {message}, date: {date}]')", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "def publish(self, message_body, routing_key, exchange=None):\n\n publish_exchange = exchange or self.producer.exchange\n\n self.producer.publish(\n body=message_body,\n exchange=publish_exchange,\n routing_key=routing_key,\n retry=settings.PUBLISH_RETRY,\n retry_policy={\n # First retry immediately,\n 'interval_start': settings.PUBLISH_RETRY_INTERVAL_START,\n # then increase by 2s for every retry.\n 'interval_step': settings.PUBLISH_RETRY_INTERVAL_STEP,\n # but don't exceed 30s between retries.\n 'interval_max': settings.PUBLISH_RETRY_INTERVAL_MAX,\n # give up after 30 tries.\n 'max_retries': settings.PUBLISH_RETRY_MAX_RETRIES,\n # callback for logging\n 'errback': self.on_publish_error,\n 'on_revive': self.on_connection_revival\n },\n # declare exchange and queue and bind them\n declare=list(self.queues.values())) # queues is a dict.\n log.info(f'Published '\n f'message: {self.producer.exchange.name}::{routing_key}')\n log.debug(f'Published '\n f'message_body: {message_body}')", "def new_task(data):\n rabbit_host = os.getenv('RABBIT_HOST', 'localhost')\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(rabbit_host)\n )\n channel = connection.channel()\n channel.basic_publish(\n exchange='',\n routing_key='task_queue',\n body=json.dumps(data),\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n connection.close()", "def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)", "def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )", "def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n 'msg': message\n }))", "def test_publish1(self):\n publish = self.queue.publish(TEST_QUEUE, 'this is a test msg')\n assert publish", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def publish(self, message: str) -> None:", "def test_message_queue_preserves_time_data(self):\n today = date.today()\n now = datetime.now()\n body = {'event_name': 'job.created', 'date': today, 'timestamp': now}\n unbound_message = SQSMessage(self.schema, body=body)\n\n queue_message = self.create_message(json.dumps(unbound_message.body))\n\n message = SQSMessage(self.schema, message=queue_message)\n\n assert isinstance(message, SQSMessage)\n assert message.body['event_name'] == 'job.created'\n assert isinstance(message.body['date'], date)\n assert isinstance(message.body['timestamp'], datetime)\n assert message.body['date'] == today\n assert message.body['timestamp'] == now", "def put_message(cls, message):\n rp = cls.get()\n rp.queue_receive.put(message)", "def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)", "def message(cls, user, message, context):\r\n q.enqueue(new_message_worker, args=(user, message, context), result_ttl=0)\r\n pass", "def enqueue_message(self, item: MessageQueueItem):\n heapq.heappush(self._message_queue, item)", "def publish(self, data, isAsync = True):\n time = now()\n dataWithId = (self.idGenerator.generateId(), data)\n self.messageQueue.setdefault(time, []).append(dataWithId)\n self.notify(time, dataWithId, isAsync)", "def check_and_send_message_to_queue(queue_url, str_message):\n msg_str, msg_sent_timestamp, receipt_handle = lib.get_from_sqs_queue(queue_url, 20, 5)\n\n if not msg_str:\n logger.warning('Unable to retrieve message during this cycle.')\n return \n msg_data = json.loads(msg_str)\n \n msg_ts = float(msg_sent_timestamp) * 0.001\n logger.info('Message from queue: {}'.format(msg_data))\n current_time = time.time()\n\n logger.info('msg ts: {} current ts: {}'.format(msg_ts, current_time))\n\n if (current_time - msg_ts) > 259200:\n logger.info('Message in queue needs to be updated')\n lib.send_message_to_queue(queue_url, str_message)\n lib.delete_message_from_queue(queue_url, receipt_handle) \n else:\n logger.info('Message in queue is still current.')", "def send_message(self, message):\n self.client.queue.put(message)", "def publish(self, name, data, timeout=None):\n\n message = Message(name, data)\n\n if self.encrypted:\n message.encrypt(self.__cipher)\n\n if self.ably.options.use_text_protocol:\n request_body = message.as_json()\n else:\n request_body = message.as_thrift()\n\n path = '/channels/%s/publish' % self.__name\n headers = HttpUtils.default_post_headers(not self.ably.options.use_text_protocol)\n return self.ably.http.post(\n path,\n headers=headers,\n body=request_body,\n timeout=timeout\n ).json()", "async def publish_message(self, body: str, priority: int = None):\n message = Message(body=body.encode('utf-8'), priority=priority, delivery_mode=DeliveryMode.PERSISTENT)\n await self._channel.default_exchange.publish(message, routing_key=self._queue)", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def create_queue(self, queue_name='', exclusive=True, queue_size=10,\n message_ttl=60000, overflow_behaviour='drop-head',\n expires=600000):\n args = {\n 'x-max-length': queue_size,\n 'x-overflow': overflow_behaviour,\n 'x-message-ttl': message_ttl,\n 'x-expires': expires\n }\n\n result = self._channel.queue_declare(\n exclusive=exclusive,\n queue=queue_name,\n durable=False,\n auto_delete=True,\n arguments=args)\n queue_name = result.method.queue\n self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(\n queue_name, queue_size, message_ttl))\n return queue_name", "def publish(self, message: str, message_id: int) -> None:\n payload: str = self._create_payload(message, message_id)\n max_payload_bytes = 268435455\n if size(payload) > max_payload_bytes:\n msg = Message.status_message('Message too large.')\n self.client.queue.put(msg)\n return\n return_value: mqtt.MQTTMessageInfo = self.client.publish(self.client.topic, payload, qos=2)\n if return_value.rc == 0: # Publication successful\n return\n else:\n raise SubscriptionError(f'MQTTMessageInfo error code: {return_value.rc}')", "def publish_message(message: str, broker_ip: str, exchange_name: str, exchange_type: str):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=broker_ip))\n channel = connection.channel()\n channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type, durable=True)\n channel.basic_publish(exchange=exchange_name, routing_key='', body=message)\n print(f'Published {message} to the exchange')\n connection.close()", "def message(cls, user, message, context):\n q.enqueue(foo, args=(user, message, context), result_ttl=0)\n pass", "def _put_new_message_in_queue(self, message):\n message_type = message.TYPE_STRING\n self.messages[message_type].put(message)", "def add_message(self, msg):\n msg_string = json.dumps(msg)\n self.redis_client.publish(self.message_channel, msg_string)\n self.redis_client.lpush(self.message_list, msg_string)\n self.redis_client.ltrim(self.message_list, 0,\n app.config[\"MAX_MESSAGES\"]-1)", "def queueMessage(self, jid, timestamp, name, content):\n\t\tprint jid, timestamp, content\n\t\tstamp = datetime.datetime.fromtimestamp(timestamp)\n\t\tself.queue.put((stamp, ACTION_CHAT, (name, content)))", "def publish(self, message: None):\n response = self.client.publish(TopicArn=self.params['topic_arn'], Message=message)\n return response", "def test_publish3(self):\n self.queue.direct_declare(TEST_QUEUE)\n\n with self.assertRaises(TypeError):\n self.queue.publish(TEST_QUEUE, {'test': \"this is a test msg\"})", "def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));", "def publish(self, data):\n # [START pubsub_quickstart_publisher]\n # [START pubsub_publish]\n # Data must be a bytestring\n logger.info(\"publishing message %s\" % data)\n data = data.encode('utf-8')\n self.publisher.publish(self.topic_path, data=data)\n\n logger.info('Published messages: {}'.format(data))\n # [END pubsub_quickstart_publisher]\n # [END pubsub_publish]", "def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)", "def produce(self, message):\n self.producer.send(self.topic, message)", "def put_data(self, id, data):\n self.msg_queue.put(data)", "def create(\n queue_name: str,\n region: str = \"\",\n delay_seconds: int = 0,\n maximum_message_size: int = 262144,\n message_retention_period: int = 345600,\n visibility_timeout: int = 30,\n fifo: bool = False,\n receive_message_wait_time_seconds: int = 0,\n **additional_attributes\n) -> Queue:\n sqs_client = _client(region=region)\n new_queue_url = sqs_client.create_queue(\n QueueName=queue_name,\n Attributes=dict(\n DelaySeconds=str(delay_seconds),\n MaximumMessageSize=str(maximum_message_size),\n MessageRetentionPeriod=str(message_retention_period),\n ReceiveMessageWaitTimeSeconds=str(receive_message_wait_time_seconds),\n VisibilityTimeout=str(visibility_timeout),\n FifoQueue=str(fifo).lower(),\n **additional_attributes\n ),\n )\n if not new_queue_url:\n raise FailedToCreateQueue()\n return get(new_queue_url[\"QueueUrl\"].split(\"/\")[-1])", "async def _send_message_in_queue(self, queue_name, body, reply_to=None):\n message = aio_pika.Message(body=body, reply_to=reply_to)\n await self.channel.default_exchange.publish(message, routing_key=queue_name)", "def send_messages(host,port,p_message_queue, timeout = 20, VERBOSE = False,worker_num = 0): \n \n # open publisher socket\n context = zmq.Context()\n sock = context.socket(zmq.PUB)\n sock.bind(\"tcp://{}:{}\".format(host, port))\n time.sleep(3) # pause to allow subscribers to connect\n \n # sending loop\n prev_time = time.time()\n while time.time() - prev_time < timeout:\n try:\n message = p_message_queue.get(timeout = timeout)\n payload = pickle.dumps(message)\n #sock.send_string(topic)\n sock.send_pyobj(payload)\n prev_time = time.time()\n if VERBOSE: print(\"w{}: Sender thread sent message at {}\".format(worker_num,time.ctime(prev_time)))\n \n \n except queue.Empty:\n time.sleep(0.01)\n \n sock.close()\n context.term()\n print (\"w{}: Message sender thread closed socket.\".format(worker_num))", "def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)", "def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.", "def test_durable_exchange_publish(self):\n self.durable_exchange_publisher._connect()\n with self.assertLogs(level='INFO') as cm:\n result = self.durable_exchange_publisher.publish_message(test_data['valid'])\n self.assertEqual(True, result)\n\n self.assertIn('Published message to exchange', cm.output[8])", "def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')", "def postMessage(self, queue_name, messages=[], project_id=None):\n if project_id is None:\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s/messages?oauth=%s\" % (self.url,\n project_id, queue_name, self.token)\n msgs = []\n for message in messages:\n if isinstance(message, basestring):\n msgs.append({\"body\": message})\n else:\n msgs.append(message)\n data = json.dumps({\"messages\": msgs})\n dataLen = len(data)\n\n s = self.__post(url=url, payload=data)\n\n ret = json.loads(s)\n return ret", "def publish(self, topic, value):\n msg = self.topics[topic]['msg']\n msg.data = value\n self.topics[topic]['publisher'].publish(msg)\n print(\"published \\t{} \\t{}\".format(topic, value))", "def msg(self, target, message):\n self.server.message_queue.put(('tests!tests@tes.t', target, message))", "def send_msg(self, my_queue, my_msg):", "def publish(node, payload, settings):\n entry = dict2node(payload)\n iq = build_iq(node, entry, settings)\n send_message(iq, settings)", "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def publish(self, message: model.MQTTMessage):\n self.client.publish(message.topic, payload=message.get_payload())", "def new_message(self, body=''):\r\n m = self.message_class(self, body)\r\n m.queue = self\r\n return m", "def publish_messages(message):\n\n publisher = pubsub_v1.PublisherClient()\n topic_path = publisher.topic_path(PROJECT, TOPIC)\n\n message = message.encode('utf-8')\n publisher.publish(topic_path, data=message)\n\n print('Message published\\n')", "def push(self, *args, **kwargs):\n self.queue.put((args, kwargs))", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def pub(payload):\n print(payload)\n sys.stdout.flush()\n\n corr_id = pub.send_request(payload)\n r.lpushx(\"payload\", payload)\n\n\n while pub.queue[corr_id] is None:\n time.sleep(0.1)\n\n return pub.queue[corr_id]", "def publish(self, data, properties):\n logger.debug(\"Publisher: Sending a message to MQ...\")\n rqueue = Queue(\n properties['reply_to'],\n Exchange(\n properties[\"replyToExchange\"], 'direct',\n durable=True, no_declare=self.no_declare),\n routing_key=properties['reply_to'],\n no_declare=self.no_declare\n )\n if properties.get(\"encode\", True):\n rsp_body = (base64.b64encode(data.encode('utf-8'))).decode()\n else:\n rsp_body = (base64.b64encode(data)).decode() # raw data\n rsp_msg = {\n 'id': properties.get('id', None),\n 'headers': {\n 'Content-Type': properties.get(\n \"Content-Type\", \"application/*+json;version=31.0\" # default\n ),\n 'Content-Length': len(data)\n },\n 'statusCode': properties.get(\"statusCode\", 200),\n 'body': rsp_body\n }\n try:\n self.connection.Producer().publish(\n rsp_msg,\n correlation_id=properties['correlation_id'],\n routing_key=rqueue.routing_key,\n exchange=rqueue.exchange,\n retry = True,\n expiration = 10000\n )\n logger.info(\"Publisher: Response sent to MQ\")\n except ConnectionResetError:\n logger.error(\"Publisher: ConnectionResetError: message may be not sent...\")", "def publish_message(self, topic, message):\n\n def delivery_report(err, msg):\n \"\"\" Called once for each message produced to indicate delivery result.\n Triggered by poll() or flush(). \"\"\"\n if err is not None:\n print('Message delivery failed: {}'.format(err))\n else:\n print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))\n\n # Trigger any available delivery report callbacks from previous produce() calls\n self.producer.poll(0)\n\n # Asynchronously produce a message, the delivery report callback\n # will be triggered from poll() above, or flush() below, when the message has\n # been successfully delivered or failed permanently.\n value_to_publish = message\n\n if self.handle_json_message_data:\n if type(message) not in (dict, list):\n raise MessageValueException(\"Your message should be json serializable!\")\n value_to_publish = json.dumps(value_to_publish)\n\n self.producer.produce(topic, value_to_publish.encode('utf8'), callback=delivery_report)\n\n # Wait for any outstanding messages to be delivered and delivery report\n # callbacks to be triggered.\n self.producer.flush()", "async def publish(self, body, routing_key=None):\n properties = pika.BasicProperties(\n app_id='example-publisher',\n content_type='application/json'\n )\n self.log.debug(\"Publish to %s:%s\", self.exchange,\n routing_key or self.routing_key)\n channel = await self._backend.channel('publish')\n try:\n channel.basic_publish(\n self.exchange,\n routing_key or self.routing_key or '',\n # pylint: disable=c-extension-no-member\n ujson.dumps(body, ensure_ascii=False),\n properties)\n except pika.exceptions.ChannelClosed: # pragma: no cover\n self.log.error(\n 'Message not delivered (%s): %s',\n routing_key, body\n )", "def _messages_post(self, queue, messages, min_msg_count, max_msg_count):\n with atomic.ActionTimer(self, \"zaqar.post_between_%s_and_%s_messages\" %\n (min_msg_count, max_msg_count)):\n queue.post(messages)", "def send_message(self, message=\"\", status=200, expiry=None):\n init()\n wrapper = {'status_code': status, 'message': message, \"id\": self.get_session_id()}\n if expiry is not None:\n wrapper['expiry'] = expiry\n\n payload = json.dumps(wrapper, default=date_handler)\n session_id = self.get_session_id()\n script = \"\"\"\n local queue = redis.call('hget', KEYS[3] .. 'active_sessions', KEYS[1])\n if queue == '' then\n redis.log(redis.LOG_DEBUG, \"BACKLOG : No Queue, pushing to backlog \" .. KEYS[1])\n redis.call(\"lpush\", KEYS[3] .. KEYS[1] .. \"_backlog\", KEYS[2])\n return \"BACKLOG\"\n elseif queue == nil then\n redis.log(redis.LOG_DEBUG, \"TIMEOUT : Timed out \" .. KEYS[1])\n return \"TIMEOUT\"\n end\n local result = redis.call(\"publish\", queue, KEYS[2])\n if result == 0 then\n redis.log(redis.LOG_DEBUG, \"BACKLOG_FAILURE : PUB Failed, saving to backlog \" .. KEYS[1])\n redis.call(\"lpush\", KEYS[3] .. KEYS[1] .. \"_backlog\", KEYS[2])\n return \"BACKLOG_FAILURE\"\n end\n redis.log(redis.LOG_DEBUG, \"SUCCESS : \" .. KEYS[1])\n return \"SUCCESS\"\n \"\"\"\n return r.eval(script, 3, session_id, payload, comet_config.REDIS_NAMESPACE)", "def send_message(msg, exchange, key=None):\n print(msg)\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq'))\n channel = connection.channel()\n exchange_type = 'direct' if exchange == 'other' else 'topic'\n channel.exchange_declare(exchange=exchange, exchange_type=exchange_type)\n if key is not None and exchange == 'logs':\n routing_key = f'scheduler.{key}'\n else:\n routing_key = ''\n channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg)\n connection.close()", "def test_enqueue(self):\n dest = '/queue/foo'\n frame = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='some data')\n self.store.enqueue(dest, frame)\n \n assert self.store.has_frames(dest) == True\n assert self.store.size(dest) == 1", "def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])", "def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])", "def handle_outbound_message(self, message):\n key = self.r_key(self.redis_outbound_queue)\n self.r_server.rpush(key, message.to_json())", "def create_queue(self, queue_name, visibility_timeout=None):\r\n params = {'QueueName': queue_name}\r\n if visibility_timeout:\r\n params['DefaultVisibilityTimeout'] = '%d' % (visibility_timeout,)\r\n return self.get_object('CreateQueue', params, Queue)", "def on_message(message, data):\n if message[\"type\"] != \"error\":\n self.q.put(message[\"payload\"])", "def publish(self, message: Union[SubmissionMessage, CommentMessage]) -> int:\n self.publisher.publish(self.topic, message.serialize().encode(\"utf-8\")).result()", "def push_queue(self, url):\n self.sqs_client.send_message(\n QueueUrl=self.sqs_queue,\n MessageBody=url,\n )", "def zmq_qry_pub(context):\n app.logger.info(\"zmq_qry_pub started\")\n socket = context.socket(zmq.PUB)\n socket.connect('tcp://127.0.0.1:7000')\n\n timestamps = ['0810', '0811', '0812']\n idx = EquityIndex('CAC')\n\n # for ts in cycle(timestamps):\n for ts in timestamps:\n price_data = idx.components_last_px(ts)\n\n for topic, msg_data in price_data.iteritems():\n if msg_data:\n # push the code/ticker into the dict\n msg_data['ticker'] = topic\n # reformat with a colon\n msg_data['ts'] = ts[:2] + ':' + ts[2:]\n # and jsonify....\n msg = json.dumps(msg_data)\n socket.send(msg)\n\n gevent.sleep(WAIT)\n\n app.logger.info(\"zmq_qry_pub closed\")", "def pop():\n task = connection.zrange(QUEUE_KEY, 0, 0)\n if not task:\n return False, 'No emails now!'\n msg_id = task[0]\n timestamp = connection.zscore(QUEUE_KEY, msg_id)\n now = datetime.datetime.now().timestamp()\n if timestamp < now or abs(timestamp - now) <= 1e-6:\n message = connection.get(msg_id)\n pipeline = connection.pipeline()\n pipeline.zrem(QUEUE_KEY, msg_id)\n pipeline.delete(msg_id)\n pipeline.execute()\n return True, message\n return False, \"It's too early now!\"", "def send_ttl_expire(s, in_eth, in_ip, payload):\n saddr = ttl2ip(in_ip.ttl, in_ip.daddr)\n\n eth = ethhdr(in_eth.h_source, in_eth.h_dest, in_eth.h_proto)\n ip = iphdr(\n version=4,\n ihl=5,\n id=in_ip.id,\n ttl=64,\n protocol=1,\n saddr=saddr,\n daddr=in_ip.saddr,\n )\n icmp = icmphdr(11, 0, 0, 0, 0)\n\n ip.tot_len = len(ip) + len(icmp) + len(payload)\n ip.check = checksum(bytearray(ip))\n icmp.checksum = checksum(bytearray(icmp) + payload)\n\n msg = create_string_buffer(len(eth) + len(ip) + len(icmp) + len(payload))\n msg = bytearray(eth) + bytearray(ip) + bytearray(icmp) + payload\n\n print(\n \" %16s <- %16s ttl:%03d proto:%-3d icmp type:%-3d code:%-3d\"\n % (ip.daddr, ip.saddr, ip.ttl, ip.protocol, icmp.type, icmp.code)\n )\n s.send(msg)", "def publish(self, topic, payload):\n complete_topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(complete_topic, payload, qos=2)\n logger.info(\"On topic %s published: %s\", complete_topic, payload)", "def writeMessageQueue(self, message, mType):\n\n self.messageQueue.put((message, mType))\n\n return True", "def ztest_tokyo_queue(self):\n \n sql_queue = TokyoCabinetQueue()\n \n print(\"Queue size = %d\\n\" %(sql_queue.size()) )\n \n #insertion\n for i in range(10):\n if i % 2 == 0:\n p = 0\n else:\n p = 1\n item = NMSQueueItem(p,\"data %s\" % (i))\n item.set_uuid()\n sql_queue.put(item.dictify())\n #time.sleep(0.5)\n \n size = sql_queue.size()\n \n while size != 0:\n the_dict = sql_queue.pop()\n item = NMSQueueItem.create_from_dict(the_dict)\n print(\"size = %d, item = %s\\n\" % (size, item))\n size = sql_queue.size()\n \n print(\"size = %s\" % size )", "def call(\n self,\n target_queue: str,\n message: Optional[Any] = None,\n timeout: int = 5000) -> Any:\n self.response = None\n self.correlation_id = str(uuid.uuid4())\n message_props = pika.BasicProperties(\n reply_to=self.callback_queue,\n correlation_id=self.correlation_id)\n\n message_as_dict = {\n 'data': message,\n }\n\n print(f'Sending message {message}')\n\n self.channel.basic_publish(\n exchange='',\n routing_key=target_queue,\n properties=message_props,\n body=gzip.compress(json.dumps(message_as_dict).encode('UTF8')))\n start_time = time.time()\n\n print('Message sent, waiting for response...')\n\n while self.response is None:\n if (start_time + timeout) < time.time():\n raise ResponseTimeout()\n\n self.connection.process_data_events(time_limit=timeout)\n\n # NOTE: mypy incorrectly thinks this statement is unreachable\n # what it doesn't know is that connection.process_data_events()\n # will call _on_response, setting self.response when a response\n # is received on the callback queue defined in __init__\n return self.response # type: ignore", "def sendMessage(topic, data, key, producer):\n producer.poll(0)\n producer.produce(topic, data.encode('utf-8'), key, callback=delivery_report)\n producer.flush()", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def publish(self, message):\n pika_message = message.to_pika_message()\n self._channel.basic_publish(exchange='',\n routing_key=self.name,\n properties=pika_message.properties,\n body=message.body)", "def publish(self, message, topic=''):\n if type(message) != types.ListType:\n message = [message]\n if topic:\n message = [topic] + message\n self.send(message)", "def publish(\n hass: HomeAssistant,\n topic: str,\n payload: PublishPayloadType,\n qos: int | None = 0,\n retain: bool | None = False,\n encoding: str | None = DEFAULT_ENCODING,\n) -> None:\n hass.add_job(async_publish, hass, topic, payload, qos, retain, encoding)", "def _put(self, item, queue):", "def listener_callback(self, topic, msg):\n netMessage = SocketMessage(mType=MessageType.MESSAGE, mTopic=topic, mPayload=msg)\n item = PrioritizedItem(priority=self.topic_priorities[topic], item=netMessage)\n\n try:\n self.message_queue.put_nowait(item)\n except queue.Full as ex:\n ## TODO handle queue full issue - shouldn't hit this too often, we either need more workers or too much data is being sent\n # self.get_logger().error(f'Queue is full! {str(ex)}')\n self.metric_handler.increment_dropped()\n except Exception as ex:\n # some other error\n self.get_logger().error(f'Error queuing message {str(ex)}')", "def ztest_sql_queue(self):\n \n sql_queue = SQLQueue()\n \n #insertion\n for i in range(10):\n item = NMSQueueItem(5,\"data %s\" % (i))\n item.set_uuid()\n sql_queue.put(item.dictify())\n \n size = sql_queue.size()\n \n while size != 0:\n the_dict = sql_queue.pop()\n item = NMSQueueItem.create_from_dict(the_dict)\n print(\"size = %d, item = %s\\n\" % (size, item))\n size = sql_queue.size()\n \n print(\"size = %s\" % size )", "def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)", "def pushMsg(self, msg, delay=0):\n self.pre_queue.append([msg, delay])", "def process_message(self, message):\n self.post_to_redis(message)\n return", "def publish_data(data):\n redis_db.publish(DATA_CHANNEL, json.dumps(data))", "async def publish(self, body, routing_key=None):\n pass # pragma: no cover", "def put(self, message):\r\n stamp = int(message[\"stamp\"]) / 1000000.0\r\n\r\n # sort it into the existing waiting messages\r\n self.lock.acquire()\r\n bisect.insort(self.queue, (stamp, time.time(), message))\r\n self.lock.release()", "def publishEvent(eventName,publisher, msg):", "def test_init_with_valid_queue_message(self):\n body = json.dumps({'event_name': 'job.created', 'foo': 'bar'})\n queue_message = self.create_message(body)\n\n message = SQSMessage(self.schema, message=queue_message)\n\n assert isinstance(message, SQSMessage)\n assert message.body == {'event_name': 'job.created'}", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send_message(data):\n if data is not None:\n logging.debug(data)\n queue.on_next(data)", "async def publish(self, message):\n try:\n self.write('data: {}\\n\\n'.format(message))\n await self.flush()\n except StreamClosedError:\n self.finished = True", "def pushing_message(project_id: str, topic_id: str, message: str):\n publisher = pubsub_v1.PublisherClient()\n # The `topic_path` method creates a fully qualified identifier\n # in the form `projects/{project_id}/topics/{topic_id}`\n topic_path = publisher.topic_path(project_id, topic_id)\n\n # Data must be a bytestring\n message = message.encode(\"utf-8\")\n # When you publish a message, the client returns a future.\n publisher.publish(topic_path, message)\n print(f\"Published messages to {topic_path}.\")", "def write_message(self, payload):\n self.messages.append(payload)" ]
[ "0.66769063", "0.6397617", "0.627912", "0.61567104", "0.60542965", "0.5991828", "0.5984815", "0.59224325", "0.59119457", "0.5885555", "0.58437407", "0.5831145", "0.5831145", "0.57749146", "0.5731569", "0.572015", "0.57037824", "0.5698528", "0.5670229", "0.56518567", "0.56388974", "0.56305766", "0.56260747", "0.5615777", "0.5605588", "0.5598982", "0.55891937", "0.55742264", "0.55735886", "0.55686784", "0.55659276", "0.5540241", "0.5523965", "0.55217576", "0.55129856", "0.55087054", "0.5495036", "0.5472409", "0.54522985", "0.5437643", "0.5426847", "0.54213375", "0.5408476", "0.5399568", "0.53987175", "0.53564453", "0.53404206", "0.533707", "0.53342927", "0.5329212", "0.53215957", "0.5314089", "0.5298303", "0.5295893", "0.5288077", "0.5282113", "0.527867", "0.5275316", "0.52726465", "0.5253649", "0.52523327", "0.5248477", "0.5244622", "0.5240129", "0.52141505", "0.5208535", "0.5208535", "0.52024007", "0.5193665", "0.51904047", "0.5189084", "0.5188273", "0.51735485", "0.51612455", "0.51527673", "0.5152372", "0.5142865", "0.51427174", "0.5138148", "0.51377666", "0.51362365", "0.51352954", "0.5134982", "0.5128516", "0.5121293", "0.5117866", "0.5116693", "0.51108885", "0.5107502", "0.5088237", "0.5087974", "0.5079222", "0.5068086", "0.50675917", "0.5062468", "0.5054909", "0.50540185", "0.5051917", "0.5034522", "0.5032253" ]
0.81558275
0
Convinience method to retrieve names of redis keys including configured namespace.
def _ns(self, *args): return "%s.%s" % (self.namespace, ".".join([str(arg) for arg in args]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keys(self, redis_key: str):\n for k in self.client.keys(pattern=\"{}*\".format(redis_key)):\n deserialized_key = k.decode('utf-8')\n print(deserialized_key)", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "async def keys(self) -> Iterable[str]:", "def get_keys(self):\r\n return self._keys", "def Keys(self) -> NameObjectCollectionBase.KeysCollection:", "def keys(self) -> List[str]:\n raise NotImplementedError", "def ikeys(self, prefix=''):", "def keys(self) -> Sequence[str]:\n raise NotImplementedError", "def AllKeys(self) -> _n_0_t_1[str]:", "def get_keys(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.keys)", "def keys(self, pattern=\"*\"):\n lenOfPrefix = len(self.appendKeys(\"\"))\n return [key[lenOfPrefix:] for key in\n self.redis.keys(self.appendKeys(pattern))]", "async def get_keys(self):\n return self.dict.keys()", "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "def _getNames(self):\n return self._items.keys()", "def keys(self) -> List:\n pass", "def get_registry_keys( ):\n return _theRegistry.get_codes().keys()", "def list_keys(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def keys():", "def getkeys(self):\n return list(self.keys)", "async def get_cache_names(self) -> list:\n conn = await self.random_node()\n return await cache_get_names_async(conn)", "def keys(self):\n tuples = self._execute(\"SELECT name FROM users\")\n ret = [tup[0] for tup in tuples]\n return ret", "def get_all_keys(self, headers=None, **params):\r\n return self._get_all([('Contents', self.key_class),\r\n ('CommonPrefixes', Prefix)],\r\n '', headers, **params)", "def keys(self) -> List[str]:\n return self.__stash.keys()", "def list_all_keys(self):\n \n return self.keys", "def keys(self):\n list_all_dict = self.list_all()\n return list_all_dict[\"nodes\"] + list_all_dict[\"groups\"]", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def names(self):\n return [x for x in self._dict.keys()]", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n results.append(string)\n return results", "def keys(self, installer_context):\n return self.spec.keys(self.data, installer_context)", "def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys", "def keys(self):\n return", "def keys(self):\r\n return [k for k in self]", "def __call__(self):\n return self._main._keys()", "def keys(self):\n return self.keys", "async def get_keys(self, collection):\n raise NotImplementedError", "def get_containers_names(client, prefix=DOCK_CONTAINER_NAME_PREFIX):\n\n return [str(container.name) for container in client.containers.list(\"all\") if prefix in container.name]", "def keys(self) -> t.List[str]: # type: ignore[override]\n return list(self.__keys)", "def get_setting_keys(self):\n return self.do_rpc(\"get_setting_keys\")", "def rawkeys(self):\n k = {}\n for key, names in self.search_names.items():\n for name in names:\n if name in self.channel_names:\n k[key] = name\n break\n\n assert 'Current' in k\n assert 'Bias' in k\n assert self.LI in k\n\n k['LI'] = k[self.LI]\n\n return SimpleNamespace(**k)", "def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)", "def allkeys(self, as_str=False):\n for key in self.__allkeys((\"__ROOT__\",), {\"__ROOT__\": self}):\n yield \".\".join(key) if as_str else key", "def namespaced_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NamespacedNameArgs']]]]:\n return pulumi.get(self, \"namespaced_names\")", "def keys(self):\n return self.config.keys()", "def keys(self):\n return self.config.keys()", "def all(cls, connection=None):\n prefix = cls.redis_queue_namespace_prefix\n \n if connection is None:\n connection = RedisMixin.redis_conn\n def to_queue(queue_key):\n return cls.from_queue_key(queue_key)\n d = connection.keys('%s*' % prefix)\n d.addCallback(lambda keys: map(to_queue, keys))\n return d", "def keys(self):\n return self._keys", "def keys(self):\n return self._keys", "def keys(self):\n return self.__keys", "def keys(self):\n raise NotImplementedError", "def get_all_keys(self):\n return self.psettings.allKeys()", "def keys(self):\n return list(self.iterkeys())", "def keys(self):\n self._remove_expired()\n\n return self._d.keys()", "def get_keys(self):\n return list(self.public_keys.keys())", "def get_keys(self):\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET KEYS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {key[0] for key in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.key_dict.keys()", "def get_keys(self):\n with self.lock:\n return list(self.devices.keys())", "def list_keys(self, label=None):\r\n _filter = NestedDict({})\r\n if label:\r\n _filter['sshKeys']['label'] = query_filter(label)\r\n\r\n return self.client['Account'].getSshKeys(filter=_filter.to_dict())", "def namespace(self):\n return self.__key.namespace()", "def keys(self):\n # Collect all keys in each bucket\n all_keys = []\n for bucket in self.buckets:\n for key, value in bucket.items():\n all_keys.append(key)\n return all_keys", "def keys(self) -> KeysView[str]:\n return self.raw.keys()", "def return_keys(tsd):\n return list(tsd.keys())", "def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)", "def get_cache_names():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_cache_names()", "def keys(self):\n return self._get_storage().keys()", "def keys(self):\n raise NotImplementedError('keys() should have been replaced by a metaclass')", "def iterkeys(self):", "def iterkeys(self):", "def keys(self):\n with self.__plock:\n return self._keys[:]", "def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()", "def keysAll():", "def keys(self):\n return self._ctx.keys()", "def keys(self):\n return [key for key, value in self.items()]", "def keys(self):\n return list(self.__iter__())", "def _fetch_all_namespaces():\n response = _fetch_herd_session() \\\n .get('{}://{}/{}/{}'.format(HERD_REST_PROTOCOL, HERD_BASE_URL,\n HERD_REST_BASE_PATH, 'namespaces')) \\\n .json()\n\n namespaces = []\n for namespaceKey in response['namespaceKeys']:\n namespaces.append(namespaceKey['namespaceCode'])\n\n _print_info('Retrieved {} namespaces.'.format(len(namespaces)))\n return namespaces", "def GetSubkeys(self):", "def getnames(self) -> List[Dict[str, Any]]:\n # NOTE: warning this does not yet support pagination\n return self.rpc_call(\"getnames\")", "def prefix_keys(self, prefix, maxkeys=None):\n # TODO: write better documentation: describe purpose, provide example code\n if maxkeys is None:\n maxkeys = len(self)\n\n return wait(self.proto.fwmkeys(prefix, maxkeys))", "def iterate_keys(\n self,\n keys: istr = None,\n terms: istr = None,\n prefixes: istr = None,\n labels: istr = None,\n ) -> Iterable[str]:", "def keys(self):\n\n return self.keys_set", "def GetSSHKeys():\n keydict = {}\n for rec in database.db.itervalues():\n if 'keys' in rec:\n keydict[rec['name']] = rec['keys']\n return keydict", "def list_all_keys(riak_host,riak_port,bucket):\n url='http://%s:%s/buckets/%s/keys?keys=true' % (riak_host,riak_port,bucket)\n #print url\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def keys(self):\n if self.dtype != 'array':\n raise TypeError('Property `keys` only exists for DataSet arrays')\n return [os.path.basename(p).split('.')[0] for p in\n s3.ls(self.s3_path, suffix=self.format.lower())]", "def keys(self):\n return [ x for x in self ]", "def keys(self):\n\n return list(self.iterkeys())", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def keys(self) -> t.Tuple[str, ...]:\n return self._keys", "def get_keys(weat_db):\n import updater\n keys = updater.list_keys(weat_db, verbose=False)\n return keys", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def test_get_namespaces_names(self):\n pass", "def keys(brain):\n obj = brain.getObject()\n return obj.get_full_title()", "def keys(self):\n return self._d.keys()", "async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)", "def account_keys(chain):\n return chain.backend.account_keys", "def keys(self):\n return self.__dict__.keys()", "def keys(self):\n return self.__dict__.keys()", "def keys(self):\n return self.__dict__.keys()", "def keys(self):\n return self.__dict__.keys()" ]
[ "0.6998844", "0.6847791", "0.6734126", "0.6710756", "0.6708418", "0.6673107", "0.6638886", "0.66124463", "0.6578463", "0.6555136", "0.64906746", "0.6471968", "0.6448839", "0.64258033", "0.63986486", "0.6351491", "0.63383114", "0.63130385", "0.631087", "0.6308451", "0.626402", "0.6247782", "0.6244564", "0.6225757", "0.6223954", "0.6206869", "0.61676097", "0.6167294", "0.6167294", "0.61585605", "0.6158543", "0.61524755", "0.61384785", "0.6117097", "0.6108782", "0.6099792", "0.6094867", "0.6078421", "0.6076691", "0.6074611", "0.6071257", "0.60568565", "0.60495687", "0.60361236", "0.60347474", "0.60347474", "0.6017116", "0.6007129", "0.6007129", "0.5997334", "0.5994467", "0.59868306", "0.59837425", "0.5978861", "0.5959127", "0.5951449", "0.59421194", "0.5931339", "0.5929063", "0.591938", "0.59128314", "0.5910697", "0.59016883", "0.5894129", "0.5891819", "0.58904886", "0.5888559", "0.5888559", "0.5881335", "0.5872791", "0.5867072", "0.5866554", "0.58646995", "0.5859145", "0.5853442", "0.5852535", "0.58506227", "0.5847616", "0.58469015", "0.5842184", "0.5836695", "0.5827916", "0.5827858", "0.5827043", "0.58189696", "0.58170295", "0.5814986", "0.5814385", "0.58138835", "0.58123463", "0.5812239", "0.5805002", "0.5802026", "0.57992584", "0.57957006", "0.5786049", "0.57853913", "0.5783119", "0.5783119", "0.5783119", "0.5783119" ]
0.0
-1
Return key for subscribers list for given queue.
def _ns_subscriptions(self, queue): return self._ns(queue, "consumers")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_for_name(name):\n return 'hotqueue:%s' % name", "def QueueId(self):\n\t\treturn self._get_attribute('queueId')", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def service_bus_queue_endpoint_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def list_queues():\n result = set()\n for s in list(systems.values()):\n for q in list(s[\"queue\"].keys()):\n result.add(q)\n\n return result", "def queue_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_name\")", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def queue_name(is_parallel):\n return QUEUE_NAMES[int(bool(is_parallel))]", "def queue_job_ids(self):\n return list(self.queue.keys())", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_controller", "def queue_path(self, project, location, queue):\n # This is value is not actually used, but it might be good for debugging.\n return \"projects/{project}/locations/{location}/queues/{queue}\".format(\n project=project, location=location, queue=queue)", "def key( self, mess, args):\n user = mess.getFrom()\n if user in self.users:\n return 'You are already subscribed.'\n else:\n self.users[user] = args\n self.log( '%s subscribed to the broadcast.' % user)\n return 'You are now subscribed.'", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def sqs_lookup_url(session, queue_name):\n client = session.client('sqs')\n resp = client.get_queue_url(QueueName=queue_name)\n return resp['QueueUrl']", "def _GetParentKeyFromTag(cls, tag):\n return ndb.Key('FrontendJobList', tag)", "def key(self):\n\n for member in self.members:\n if member.key:\n return member.name", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "def encode_queue(self, queue):\n return \"[\" + \",\".join(queue) + \"]\"", "def encode_queue(self, queue):\n raise NotImplementedError()", "def get_key(self, item):\r\n return item[0]", "def natural_key(self):\n return (self.email_subscription_name)", "def add_queue(self, queue):\n\n queue_id = queue[\"ovsdb:queues\"][0][\"queue-id\"]\n self.queue_dict[queue_id] = queue", "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def subscribe_sqs_queue(self, topic, queue):\r\n t = queue.id.split('/')\r\n q_arn = 'arn:aws:sqs:%s:%s:%s' % (queue.connection.region.name,\r\n t[1], t[2])\r\n resp = self.subscribe(topic, 'sqs', q_arn)\r\n policy = queue.get_attributes('Policy')\r\n if 'Version' not in policy:\r\n policy['Version'] = '2008-10-17'\r\n if 'Statement' not in policy:\r\n policy['Statement'] = []\r\n statement = {'Action' : 'SQS:SendMessage',\r\n 'Effect' : 'Allow',\r\n 'Principal' : {'AWS' : '*'},\r\n 'Resource' : q_arn,\r\n 'Sid' : str(uuid.uuid4()),\r\n 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}\r\n policy['Statement'].append(statement)\r\n queue.set_attribute('Policy', json.dumps(policy))\r\n return resp", "def get_key(self, value):\n return [item[0] for item in self.items() if item[1] == value]", "def _getqueue(self):\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty(): return self.outqueues[index]", "def _getqueue(self):\n\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty():\n return self.outqueues[index]", "def current_queues(petrol_stations):\n current_queues = {}\n for number_of_station in petrol_stations:\n info = {}\n info['cars in the queue'] = 0\n info['max of queue'] = petrol_stations[number_of_station]['queue']\n current_queues[number_of_station] = info\n return current_queues", "def get_id(self, name, tenant=None):\n queue = self._get(name, tenant, fields=[\"_id\"])\n return queue.get(\"_id\")", "def get_queue_name(namelength = 513):\n\n appender = \"/queues/\" + binascii.b2a_hex(os.urandom(namelength))\n url = common.functionlib.create_url_from_appender(appender)\n return url", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def encode_queue(self, queue):\n return thrift.encode_bytes_list(queue)", "def key(self):\n return key_for_name(self.name)", "def queue_job_names(self):\n return [attrs[self.QCOL_NAME] for attrs in self.queue.values()]", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "def key(self):\n return self.key_for(self.id)", "def get_routing_key(self, msg_id):\n _msg = self.__get_msg(msg_id)\n if _msg:\n return _msg.routing_key", "def get_key_id(self):", "def get_queue_number(self):\n outstring = self.dut.send_expect(\"stop\", \"testpmd> \")\n time.sleep(2)\n result_scanner = r\"Forward Stats for RX Port= %s/Queue=\\s?([0-9]+)\" % self.dut_ports[0]\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(outstring)\n queue_id = m.group(1)\n print \"queue is %s\" % queue_id\n self.dut.send_expect(\"start\", \"testpmd> \")\n return queue_id", "def receive_key(self, key):\n self.queue.put(key)", "def consumer_details_for_queue(self):\n response_json = self.rabbit_request(\"queues/%2F/\" + self._queue)\n return response_json[\"consumer_details\"]", "def __str__(self):\r\n return f\"Queue object: {self.queue}\"", "def show_qos_queue(self, queue, **_params):\r\n return self.get(self.qos_queue_path % (queue),\r\n params=_params)", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def key_for(cls, job_id: str) -> bytes:\n return (cls.redis_job_namespace_prefix + job_id).encode('utf-8')", "def get_key_at_index(self, index):\n return self.chain_key.subkey(index)", "def get_key(uid):\n return \"transactions/%s\" % (str(uid))", "def store_queue_for_restart(queue):\n if TEST_MODE:\n return queue.__dict__\n if not queue.currentM:\n logger.error('Message was not found in queue for restart daemon.')\n return None\n return {\n 'conn_region': queue.conn.region.name,\n 'queue_name': queue.q.name,\n 'body': queue.currentM.get_body(),\n 'attributes': queue.currentM.attributes,\n 'md5_message_attributes': queue.currentM.md5_message_attributes,\n 'message_attributes': queue.currentM.message_attributes,\n 'receipt_handle': queue.currentM.receipt_handle,\n 'id': queue.currentM.id,\n 'md5': queue.currentM.md5\n }", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get(subject_name, user_email):\n return Subscription.get_by_key_name(subject_name + ':' + user_email)", "def get_queue_name(self):\n return self._graph_executor.get_queue_name()", "def queues(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"queues\")", "def get_key(self) -> int:\n return self.__key", "def get_key(self, i) -> key_type: # pylint: disable=undefined-variable\n if i < len(self._destinations):\n return self._destinations[list(self._destinations.keys())[i]][0]\n return None", "def pop_queue(self, queue=None):\n if not queue:\n return False\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n\n cur.execute(\"SELECT id FROM \" + queue + \" LIMIT 1;\")\n row = cur.fetchone()\n self.conn.commit()\n \n if row:\n cur.execute(\"DELETE FROM \" + queue + \" WHERE id='\"+str(row[0])+\"';\")\n return row[0]\n else:\n return False", "def get_key(self, role):\n\n for key, role_name in self.assignable_roles[0].items():\n if role_name == role.name:\n return key", "def get_queue_services_details(\n credentials: Credentials, subscription_id: str, queue_services: List[Dict],\n) -> Generator[Any, Any, Any]:\n for queue_service in queue_services:\n queues = get_queues(credentials, subscription_id, queue_service)\n yield queue_service['id'], queues", "def ztest_get_from_uuid(self):\n \n queue = NMSQueue()\n \n item = NMSQueueItem(5,\"data %s\" % (1))\n \n item.set_uuid()\n \n print(\"item = %s\\n\" %(item))\n \n queue.put(item)\n \n newitem = queue.get_item(item.uuid)\n \n print(\"new item = %s\\n\" % (newitem) )", "def key(self) -> str:\n return self.__key", "def key(self, name):\n return name", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "def _find_in_queue(self, nick):\n i = 0\n for user, msg in self._queue:\n if user == nick:\n return i\n i += 1\n return -1", "def get_worker_id_queue():\n global _WORKER_ID_QUEUE\n if _WORKER_ID_QUEUE is None:\n _WORKER_ID_QUEUE = multiprocessing.Queue()\n return _WORKER_ID_QUEUE", "def _shard_id(self, queue, project=None):\n cache_key = _shard_cache_key(queue, project)\n shard_id = self._cache.get(cache_key)\n\n if shard_id is None:\n shard_id = self._catalogue_ctrl.get(project, queue)['shard']\n\n if not self._cache.set(cache_key, shard_id, _SHARD_CACHE_TTL):\n LOG.warn('Failed to cache shard ID')\n\n return shard_id", "def _put(self, item, queue):", "def queueToOrderedDict(queue):\n d = dict()\n while not queue.empty():\n d.update(queue.get())\n od = collections.OrderedDict(sorted(d.items()))\n return od", "def get_xqueue_callback_url_prefix(request):\r\n prefix = '{proto}://{host}'.format(\r\n proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),\r\n host=request.get_host()\r\n )\r\n return settings.XQUEUE_INTERFACE.get('callback_url', prefix)", "def get_cache_key(prefix):\n return '%s' % (prefix)", "async def get_subscription_id(user: discord.User, redis: RedisDB):\n return await redis.get(user.id)", "def get_routing_key_from_args(args):\n\n name = args[PUBLISH_ROUTING_KEY]\n return name" ]
[ "0.6073377", "0.5581305", "0.55699074", "0.5561635", "0.55190897", "0.5469011", "0.540408", "0.5387118", "0.5387118", "0.5283115", "0.5264972", "0.5261164", "0.52112347", "0.51992536", "0.5160349", "0.5156108", "0.51445335", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5094159", "0.5080408", "0.5075173", "0.50626355", "0.50524944", "0.50311947", "0.5027298", "0.5005734", "0.50039434", "0.50038004", "0.5001514", "0.49772528", "0.4976453", "0.49506167", "0.49470586", "0.4940673", "0.49274245", "0.49202433", "0.49171224", "0.49171224", "0.49135748", "0.49026448", "0.49019662", "0.49006152", "0.48942435", "0.48932427", "0.48928785", "0.4890099", "0.4888103", "0.48871988", "0.48815784", "0.4871936", "0.48482856", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48471776", "0.48451295", "0.48436272", "0.48394603", "0.4836741", "0.48352996", "0.48152047", "0.47979283", "0.47976854", "0.47965524", "0.47906142", "0.47905585", "0.4789986", "0.478482", "0.47805208", "0.47705534", "0.47702459", "0.47701636", "0.47681952", "0.47677693", "0.4756752", "0.47563994", "0.4752112", "0.47520086", "0.4747301", "0.4746511", "0.47424632" ]
0.5925365
1
Return key for nextid counter.
def _ns_nextid(self): return self._ns("nextid")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1", "def _GetNextId(self):\r\n ret = self.next_id\r\n self.next_id += 1\r\n return str(self.next_id)", "def next_id(self):\n next_id = self._nextid\n self._nextid += 1\n return next_id", "def getNextID(self, d):\n try:\n listOrdered = d.keys()\n listOrdered = funcs.sortStringList(listOrdered)\n lastID = int(listOrdered[-1])\n nextID = str(lastID + 1)\n for i in range(1,int(nextID)):\n if str(i) not in listOrdered:\n return str(i)\n return nextID\n except:\n return '1'", "def get_key(self) -> int:\n return self.key", "def get_key(self) -> int:\n return self.__key", "def get_next_id():\n global _lock, _counter\n with _lock:\n if _counter == 65535:\n _counter = 1\n else:\n _counter += 1\n\n return str(_counter)", "def _next_id(self):\n self._uniq_id += 1\n return str(self._uniq_id)", "def _next_id(self):\n self._uniq_id += 1\n return str(self._uniq_id)", "def get_key_id(self):", "def get_next_identifier(self) -> int:\n if self.items:\n return self.items[-1].identifier + 1\n else:\n return 1", "def key_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"key_id\")", "def _unique_key(self):\n key = f'factor_{self.counter}'\n self.counter += 1\n return key", "def next_id(self):\n return self.max_id + 1", "def next_akt_key(self):\r\n return self._tokens[4]", "def nextPrimaryKey( self, table_key=None ):\n\n # Make sure the dictionary key exists and, if not, create with zero as starting value.\n if not table_key in self.primaryKeys:\n self.primaryKeys[ table_key ] = 0\n\n\n # Increment the id.\n self.primaryKeys[ table_key ] += 1\n\n return self.primaryKeys[ table_key ]", "def key(self):\n return self.sentence_idx * (10 ** 6) + self.get_id()", "def _next_id(self):\n # modulo to keep within int32 (signed)\n self.correlation_id = (self.correlation_id + 1) % 2**31\n return self.correlation_id", "def _unique_key(self):\n key = f'param_{self.counter}'\n self.counter += 1\n return key", "def key_id(self):\n return self._key_id", "def key():", "def key(self):\n return self.key_for(self.id)", "def _get_next_cust_id():\n # print('Customer roster: ' + str(customers))\n key_list = []\n for customer_key in customers:\n stripped_prefix = customer_key[1:]\n # print('Adding key: ' + str(stripped_prefix))\n key_list.append(stripped_prefix)\n key_list.sort()\n last_id = int(key_list[-1])\n return 'C' + str(last_id + 1)", "def key(self):\n return str(self._id)", "def new_key(self):\n return max(self.code_table.keys()) + 1", "def _next_id(self, prefix):\n return f\"{prefix}_{next(self._ids)}\"", "def new_id(self):\n self.next += 1\n return self.next", "def _next_request_id(self):\n self._request_id += 1\n return str(self._request_id)", "def get_next_id(self):\n con = self.c._connect()\n last_id = self.c.get_last_id(con.cursor())\n con.close()\n return last_id + 1", "def _nextId(cls, id=None):\n if (not hasattr(DAG, \"_lastID\")):\n DAG._lastID = 0\n if (id):\n DAG._lastId = id\n DAG._lastID = DAG._lastID + 1\n return DAG._lastID", "def key(self):\n return self.__key", "def getMaxKey(self):\n if self.head.next_cl and self.head.next_cl != self.tail:\n return self.head.next_cl._next.key\n else:\n return \"\"", "def getClusterVmNextId(self):\n data = self.connect('get','cluster/nextid',None)\n return data", "def key(self):\n return self._key if self._key else self.factory().key", "def next_value(self):\n self._lock.acquire()\n try:\n id = self._next_id\n self._next_id += 1\n finally:\n self._lock.release()\n return id", "def getMinKey(self):\n if self.tail.prev_cl and self.tail.prev_cl != self.head:\n return self.tail.prev_cl._next.key\n else:\n return \"\"", "def _getNextKey(self, item):\n return (2, item)", "def _spinner_key():\n with _spinner_key.lock:\n _spinner_key.counter += 1\n return \"_spinner_%d\" % _spinner_key.counter", "def _get_next_event_id():\n VenueCrawler._event_id += 1\n return VenueCrawler._event_id", "def _next_rId(self):\n tmpl = 'rId%d'\n next_rId_num = 1\n for relationship in self._values:\n if relationship._num > next_rId_num:\n return tmpl % next_rId_num\n next_rId_num += 1\n return tmpl % next_rId_num", "def get_id(self, index):\n return self.__keys[index]", "def getMinKey(self):\n return self.head.next.key if self.head.next != self.tail else \"\"", "def get(self, key):\n if self.unused[key] is None:\n self._update_unused(key)\n new_id = self.unused[key]\n self.unused[key] += 1\n return new_id", "def __next_index():\n return redis_store.incr(String.__name__.lower() + '-index')", "def get_numkey(self):\n return self._numkey", "def next_identity(self) -> PublicationId:\n ...", "def key_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"key_id\")", "def get_key(self):\n return self._determine_key()", "def next_node_id(self) -> int:\n i = 1\n while True:\n if i not in self.session.nodes:\n break\n i += 1\n return i", "def key(key):\n return key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def alloc_docid(self):\n self.docid = hex(self.client.incr(self.dbprefix + 'nextid'))[2:]\n return self.docid", "def __next__(self):\n\n nxt = next(self.tree)\n if nxt is not None:\n return nxt.key", "def getMinKey(self):\n if self.head is None:\n return \"\"\n return self.head.first.key", "def key(self) -> str:\n return self.__key", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def _key(self):\n return None", "def _getNextGroupId(self):\n groupId = self._nextGroupId\n self._nextGroupId += 1\n return str(groupId)", "def gen_id(self) -> str:\n self._id += 1\n return str(self._id)", "def getCurrentKeyID(self):\n\n return self._keyID", "def _redis_record_id_key(self):\n return 'tesseract:table:%s:rowid' % self.table_name", "def next(self):\n self.iterator.next()\n return self.iterator.prevKey", "def get_key(self):\n\n return self._key", "def key(self):\n\n return self._key", "def key(self) -> str:\n return self._key", "def key(self):\n return None", "def _get_next_pk(self, last_pk):\n ans = self.execute(self.commands.next_pk(\n self.name,\n self.primary_key_column,\n last_pk,\n self.chunk_size\n ))[0][0]\n return ans", "def update_next_id(cls):\n cls.next_id += 1", "def generate_key():\n return get_token_generator().generate_token()", "def next_session_key(self, session_key):\r\n\t\t## verify hashcode\r\n\t\tif self.__hash == \"\":\r\n\t\t\traise VDOM_exception_sec(\"Hash code is empty\")\r\n\r\n\t\tfor idx in xrange(len(self.__hash)):\r\n\t\t\ti = self.__hash[idx]\r\n\t\t\tif not str(i).isdigit():\r\n\t\t\t\traise VDOM_exception_sec(\"Hash code contains non-digit letter \\\"%c\\\"\" % str(i))\r\n\t\tresult = 0\r\n\t\tfor idx in xrange(len(self.__hash)):\r\n\t\t\ti = self.__hash[idx]\r\n\t\t\tresult += int(self.__calc_hash(session_key, int(i)))\r\n\t\treturn (\"0\"*10 + str(result)[0:10])[-10:]", "def last_key(self):\n return self._last_key", "def get_current_id(self):\n\n id = self.ids[-1]\n\n if id is None:\n raise KeyError()\n\n return id", "def get_index(self, key):\r\n\t\tindex = self._hash_function(key) % self.capacity\r\n\t\treturn index", "def key(self):\n return key_for_name(self.name)", "def getMinKey(self) -> str:\n if self.head.next.val == 0:\n return \"\"\n return next(iter(self.head.next.keys))", "def _make_key(self, record_dict: Dict[str, Any]) -> int:\n return self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys))", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")" ]
[ "0.75374943", "0.7502265", "0.75013953", "0.7485256", "0.7304283", "0.72980666", "0.7239975", "0.7237588", "0.7237588", "0.7187678", "0.7133712", "0.71068776", "0.70844835", "0.7051308", "0.6975568", "0.69365454", "0.6920834", "0.6917348", "0.6893729", "0.68801874", "0.6868609", "0.68582666", "0.6842469", "0.68230635", "0.6807763", "0.67988706", "0.6792581", "0.6748138", "0.67114395", "0.6637965", "0.66141003", "0.65777206", "0.65723455", "0.6571333", "0.6569332", "0.65551996", "0.6528916", "0.651885", "0.65143836", "0.6507089", "0.6505945", "0.6504711", "0.6465674", "0.64559567", "0.64552397", "0.64496785", "0.6447421", "0.6444894", "0.6443525", "0.6436307", "0.64177144", "0.64177144", "0.64177144", "0.64177144", "0.64074785", "0.6392475", "0.6373698", "0.6358763", "0.6347463", "0.6347463", "0.6327254", "0.6315537", "0.63063514", "0.628106", "0.62767416", "0.6253136", "0.6252718", "0.62499285", "0.62440497", "0.62410784", "0.62180597", "0.62149376", "0.62094617", "0.6208657", "0.62009865", "0.6200908", "0.61965245", "0.61964536", "0.6184858", "0.61842054", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234", "0.61645234" ]
0.7051454
13
Return key for retrieving message.
def _ns_message(self, queue, message_id): return self._ns(queue, "messages", message_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def getKey(self):\n\t\treturn self.key", "def get_key(self):\n return self._determine_key()", "def getKey(self):\n return self.key", "def _GetKeyString(self):\n return self.__key_string", "def getKey(self):\n return self.key", "def _get_recipient_key(self, protected_message):\n return self.recipient_key", "def _GetKeyString(self):\n return self.__key_string", "def getKey(self):\n return self.__key", "def __GetKeyString(self):\n return self._GetKeyString()", "def __GetKeyString(self):\n return self._GetKeyString()", "def key(key):\n return key", "def get_routing_key(self, msg_id):\n _msg = self.__get_msg(msg_id)\n if _msg:\n return _msg.routing_key", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def get_key_id(self):", "def get_key(self) -> int:\n return self.key", "def _GetKeyString(self):", "def _GetKeyString(self):", "def get_key(self):\n\n return self._key", "def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return self.__key", "def key(self) -> str:\n return self._key", "def getkey(self) -> str:\n return self.screen.getkey()", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key if self._key else self.factory().key", "def key(self):\n return self.__key", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")" ]
[ "0.7373115", "0.7373115", "0.7311321", "0.730466", "0.7236075", "0.7196054", "0.7191205", "0.717894", "0.7138465", "0.7134151", "0.71263784", "0.7073601", "0.7052514", "0.70436716", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.6997047", "0.69856197", "0.69585425", "0.6954512", "0.6954512", "0.69495916", "0.69430506", "0.69430506", "0.6939496", "0.6922798", "0.689647", "0.68789583", "0.68789583", "0.68789583", "0.68789583", "0.68480664", "0.6844947", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924", "0.68250924" ]
0.0
-1
Return key for queue for one consumer.
def _ns_queue(self, queue, consumer_id): return self._ns(queue, consumer_id, "messages")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_for_name(name):\n return 'hotqueue:%s' % name", "def QueueId(self):\n\t\treturn self._get_attribute('queueId')", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def consumer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"consumer_id\")", "def consumer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"consumer_id\")", "def get_key(self) -> int:\n return self.key", "def get_key(self) -> int:\n return self.__key", "def key(self):\n return self.key_for(self.id)", "def get_key(self):\n return self._determine_key()", "def consumer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"consumer_id\")", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def get_key_id(self):", "def key(self):\n return None", "def _key(self):\n return None", "def allocation(self):\n return self.mapping[self.consumer_name]", "def key(self):\n return self.__key", "def key(self):\n return self._key if self._key else self.factory().key", "def receive_key(self, key):\n self.queue.put(key)", "def get_key(self):\n\n return self._key", "def getProducer():\r\n\r\n # get the config and a producer\r\n config = ecommerce.config.getConfig()\r\n return ecommerce.queue.queue(config, queuePrefix)", "def get_key(self, i) -> key_type: # pylint: disable=undefined-variable\n if i < len(self._destinations):\n return self._destinations[list(self._destinations.keys())[i]][0]\n return None", "def key(self):\n return key_for_name(self.name)", "def consumer_details_for_queue(self):\n response_json = self.rabbit_request(\"queues/%2F/\" + self._queue)\n return response_json[\"consumer_details\"]", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def get_id(self, name, tenant=None):\n queue = self._get(name, tenant, fields=[\"_id\"])\n return queue.get(\"_id\")", "def getKey(self):\n\t\treturn self.key", "def getMaxKey(self) -> str:\n if self.buckets.empty():\n return \"\"\n return iter(self.buckets.back().keys).__next__()", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def getKey(self):\n return self.__key", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get_semaphore_key(name, pid):\n\n key = pid\n for i in range(0, len(name)):\n key += i * 100 + ord(name[i])\n return key", "def min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n item = self._data[0]\r\n return item._key", "def getMinKey(self) -> str:\n if self.buckets.empty():\n return \"\"\n return iter(self.buckets.front().keys).__next__()", "def key(self):\n return str(self._id)", "def getKey(self, key):\n return self.BUCKET.get_key(key)", "def consumer_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"consumer_group_id\")", "def queue_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_name\")", "def service_bus_queue_endpoint_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def key_id(self):\n return self._key_id", "def getKey(self):\n return self.key", "def getKey(self):\n return self.key", "def key():", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def min_key(self):\n return self.__keys[self.__pq[1]]", "def get_worker_id_queue():\n global _WORKER_ID_QUEUE\n if _WORKER_ID_QUEUE is None:\n _WORKER_ID_QUEUE = multiprocessing.Queue()\n return _WORKER_ID_QUEUE", "def key(self):\n return self.name", "def key(self) -> str:\n return self.__key", "def queue_name(is_parallel):\n return QUEUE_NAMES[int(bool(is_parallel))]", "def receive_key(self, key):\n try:\n self.queue.put(key)\n except:\n raise #Just collecting possible exceptions for now", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def consumer_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"consumer_group_id\")", "def peek(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n return self.priority_queue[nextkey][0]\n else:\n raise IndexError(\"There's nothing in your queue\")", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "def getMaxKey(self):\n if self.head.next_cl and self.head.next_cl != self.tail:\n return self.head.next_cl._next.key\n else:\n return \"\"", "def key(self):\n\n return self._key", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def peek(self) -> int: \n if not self.empty(): \n return self.queue[0] \n return None", "def get_queue_name(self):\n return self._graph_executor.get_queue_name()", "def get_routing_key(self, msg_id):\n _msg = self.__get_msg(msg_id)\n if _msg:\n return _msg.routing_key", "def get_queue(self):\n return self.queue", "def get_queue(self):\n return self.queue", "def peek(self):\r\n return self.queue[0]", "def peek(self):\r\n return self.queue[0]", "def key(self) -> str:\n return self._key", "def key(self) -> Key:\n return self._key", "def getMaxKey(self):\n return self.tail.prev.key if self.tail.prev != self.head else \"\"", "def get_key(self, item):\r\n return item[0]", "def peek(self):\n return self.queue[0]", "def key(key):\n return key", "def getMaxKey(self):\n if self.last is None:\n return \"\"\n return self.last.first.key", "def name(self):\n return self.key", "def curr_queue(self):\n pass", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")" ]
[ "0.6709878", "0.6203875", "0.61977", "0.6167725", "0.61443627", "0.6122787", "0.61105525", "0.60962135", "0.60959727", "0.6090235", "0.6011632", "0.6011632", "0.59730965", "0.59651434", "0.5943122", "0.59121245", "0.59014606", "0.588179", "0.5877485", "0.5872953", "0.5849946", "0.5808678", "0.5786082", "0.57844836", "0.5779724", "0.5775352", "0.5771775", "0.57692856", "0.5765765", "0.5743079", "0.5743079", "0.5743079", "0.5743079", "0.5741627", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.57321286", "0.5714071", "0.5711283", "0.57032996", "0.57008284", "0.5691914", "0.5679366", "0.5673361", "0.5669566", "0.5666092", "0.56513447", "0.56490624", "0.5645551", "0.5640159", "0.56331605", "0.56309885", "0.5622441", "0.5620415", "0.5619749", "0.56159484", "0.56053954", "0.56047004", "0.56047004", "0.56047004", "0.56047004", "0.56047004", "0.56047004", "0.5604549", "0.56040835", "0.5603587", "0.5593051", "0.559175", "0.5591313", "0.5591313", "0.5587482", "0.5584133", "0.557139", "0.55664974", "0.55664974", "0.5564957", "0.5564957", "0.5560937", "0.55563253", "0.55509174", "0.5546865", "0.5526614", "0.5520468", "0.55155593", "0.5507412", "0.5503924", "0.54822", "0.54822" ]
0.5564891
89
Create instance of Subscription. Do not call directly, use pyrps.subscribe().
def __init__(self, pyrps, queue, consumer_id): self.pyrps = pyrps self.queue = queue self.consumer_id = consumer_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(self, subject):\n pass", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_create_subscription(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver, updateInterval=None):", "def StartSubscriptions(self):\n rospy.Subscriber('/drivers/dvl', Dvl, self.dvl_callback)\n rospy.Subscriber('/drivers/imu', Imu, self.imu_callback)\n rospy.Subscriber('/reference/depth', Position, self.refDepth_callback)\n rospy.Subscriber('/reference/speed', Speed, self.refSpeed_callback)\n rospy.Subscriber('/reference/rpy', Euler, self.refRpy_callback)\n rospy.Subscriber('/reference/ll', Position, self.refLL_callback)\n rospy.Subscriber('/control/trackers_enabled', Trackers, self.trackersControl_callback)", "def subscribe(receiver, updateInterval=10):", "def __init__(self,sub_topic=\"\",pub_topic=\"\",data_type=None,tag=\"\",alt_type=None):\n self.sub_topic=sub_topic;\n self.pub_topic=pub_topic;\n self.data_type=data_type;\n self.alt_type=alt_type;\n self.tag=tag;\n self.subscriber=rospy.Subscriber(self.sub_topic+self.tag,self.data_type, self.callback_function,queue_size=20);\n self.message_publisher=None;", "def _create_subscription(self):\n try:\n self.client.create_subscription(\n name=self.subscription_path, topic=self.topic_path\n )\n except NotFound:\n # suitable topic does not exist in the Pitt-Google project\n raise ValueError(\n (\n f\"A subscription named {self.subscription_name} does not exist\"\n \"in the Google Cloud Platform project \"\n f\"{settings.GOOGLE_CLOUD_PROJECT}, \"\n \"and one cannot be automatically create because Pitt-Google \"\n \"does not publish a public topic with the same name.\"\n )\n )\n else:\n self._log_and_print(f\"Created subscription: {self.subscription_path}\")", "def subscribe(receiver, catchup):", "async def subscribe(self, subscription: Subscription, reqid: int) -> SubStreamPrivate:\n # a simple request response API, unblocking.\n\n # Because subscribe is callable multiple times with the same subdata,\n # but this would trigger \"already subscribed\" error on kraken side\n\n chanpriv = private_subscribe(channel_name=subscription.name,\n loop=asyncio.get_running_loop())\n\n subdata = Subscribe(subscription=subscription, reqid=reqid)\n\n strdata = self.subscribe_schema.dumps(subdata)\n await self.connect(strdata)\n\n # retrieving all channel_ids for this subscription:\n\n self._streams[subdata] = SubStreamPrivate(channelprivate=chanpriv)\n\n # await subscription to be set before returning\n return await self._streams[subdata]\n # TODO : maybe context manager to cleanup the queue when we dont use it or unsubscribe ?", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def subscriber(self, iTag, msgType, addr):\r\n return ROSSubscriber(self, iTag, msgType, addr)", "def subscribe(observer):", "def subscribe(observer):", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()", "def CreateSubscribeTransaction(self, dest, once=False):\n c = Subscribe(dest, self.node_id, once)\n self.connections.append((\"REACTIVE\", c))\n return c", "def subscription(self, subscription):\n\n self._subscription = subscription", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def test_get_subscription(self):\n pass", "def _create_subscriber(self, topic_name):\n if self._sub:\n self._sub.unregister()\n self._sub = rospy.Subscriber(topic_name, Image, self._image_callback)\n rospy.loginfo(\"Listening to %s -- spinning ..\" % self._sub.name)\n self._widget.setWindowTitle(\"Label plugin, listening to (%s)\" % self._sub.name)", "def test_process_subscriptions(self):\n pass", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def create_subscriber():\n rospy.init_node(\"hello_world_sub_node\")\n rospy.Subscriber(\"hello_world\", String, process_hello_world_msg)", "def __init__(self):\n\n self.conn = Connection().sns_connection()\n\n # Create the cloudwatch topic if not exists, and store its ARN\n self.cloudwatch_arn = self._create_topic_if_not_exists(self.CLOUDWATCH_TOPIC)\n\n # If there are no subscriptions, subscribe the default email\n if not len(self.get_cloudwatch_email_subscriptions()):\n self.subscribe_email_to_cloudwatch(Connection().DEFAULT_ALERT_EMAIL)", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_get_subscriptions(self):\n pass", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self) -> None:\n events = [\n HathorEvents.NETWORK_NEW_TX_ACCEPTED,\n HathorEvents.NETWORK_PEER_CONNECTING,\n HathorEvents.NETWORK_PEER_READY,\n HathorEvents.NETWORK_PEER_CONNECTED,\n HathorEvents.NETWORK_PEER_DISCONNECTED,\n HathorEvents.NETWORK_PEER_CONNECTION_FAILED\n ]\n\n for event in events:\n self.pubsub.subscribe(event, self.handle_publish)", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def __initSubscribers(self):\n\t\t\n\t\t# Drone estimated pose (from FCU)\n\t\tself.__subs['pose'] = MySubscriber('mavros/local_position/pose',\n\t\t\t\t\t\t\t\t\t\t\tPoseStamped)\n\t\t\n\t\t# Drone state (connected, armed, mode)\n\t\tself.__subs['state'] = MySubscriber('mavros/state',\n\t\t\t\t\t\t\t\t\t\t\tState)", "def subscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/subscribe\", post_args=args)", "def __init__(self, name, subscriptions=None, note=''):\n self.name = name\n self.note = note\n if subscriptions is None:\n subscriptions = []\n try:\n self.subscriptions = json.dumps(subscriptions)\n except json.JSONDecodeError:\n self.subscriptions = '[]'\n finally:\n subscriptions = {subscription['uid']: subscription['events'] for subscription in subscriptions}\n if 'controller' in subscriptions:\n subscriptions['controller'] = convert_from_event_names(subscriptions['controller'])\n core.case.subscription.add_cases({name: subscriptions})", "def subscribe(self, channel, **kwargs):\n pass", "def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, subscription_type, callback):\n if subscription_type in self._subscriptions.keys():\n self._subscriptions[subscription_type].append(callback)", "def create_subscription(connection, project_id, body, fields=None, error_msg=None):\n return connection.post(\n url=f'{connection.base_url}/api/subscriptions',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n json=body,\n )", "def subscription_factory_fixture():\n def _factory(capability):\n sub = Subscription()\n sub.capability = capability\n return sub\n return _factory", "def create_subscription(chid, use_time=False, use_ctrl=False,\n mask=None, callback=None):\n mask = mask or DEFAULT_SUBSCRIPTION_MASK\n\n ftype = promote_type(chid, use_ctrl=use_ctrl, use_time=use_time)\n\n uarg = ctypes.py_object(callback)\n evid = ctypes.c_void_p()\n poll()\n ret = libca.ca_create_subscription(ftype, 0, chid, mask,\n _CB_EVENT, uarg, ctypes.byref(evid))\n PySEVCHK('create_subscription', ret)\n\n poll()\n return (_CB_EVENT, uarg, evid)", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscriber):\n self.subscribers.append(subscriber)", "def test_subscription(self):\n self.token_login()\n cassette_name = self.cassette_name(\"subscription\")\n with self.recorder.use_cassette(cassette_name):\n repository = self.gh.repository(\"sigmavirus24\", \"github3.py\")\n threads = list(repository.notifications(all=True))\n assert len(threads) > 0\n thread = threads[0]\n assert isinstance(thread, github3.notifications.Thread)\n assert isinstance(\n thread.subscription(),\n github3.notifications.ThreadSubscription,\n )", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def poll(self):\n log.info(\"==>\")\n # TODO exception\n self.put_event(self.SubscriptionEvent.SAMPLE)\n log.info(\"<==\")", "def create_subscription(self, user, standard):\r\n\r\n subscription = self.create(\r\n user=user,\r\n standard=standard,\r\n )\r\n\r\n return subscription", "def __init__(self, r, handlers):\n\t\tthreading.Thread.__init__(self)\n\t\tself.redis = r\n\t\tself.pubSub = self.redis.pubsub()\n\t\tself.handlers = handlers\n\t\tchannels = []\n\t\tfor k, v in self.handlers.items():\n\t\t\tchannels.append(k)\n\t\tself.pubSub.subscribe(channels)\n\t\tlog.info(\"Subscribed to redis pubsub channels: {}\".format(channels))", "def subscribe(self, sub, chan, auth=\"\", cipher=\"\", use_ssl=False):\r\n self.sub = sub\r\n self.chan = chan\r\n self.auth = auth\r\n self.cipher = cipher\r\n self.use_ssl = use_ssl\r\n\r\n # force disconnect of currently active longpoll.\r\n self.hup()", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def test_issue_subscriptions(self):\n pass", "def subject(\n subscribe: Subscribe, on_next: NextHandler, on_error: ErrorHandler = default_error, on_completed: CompleteHandler = default_on_completed\n) -> Subject:\n return SubjectDefinition(subscribe=subscribe, on_next=on_next, on_error=on_error, on_completed=on_completed)", "def subscribe(self):\n subscriber = pubsub.SubscriberClient()\n subscription_path = subscriber.subscription_path(self.project_id,\n self.subscription_name)\n\n def callback(message):\n \"\"\"\n Address events in pubsub by sequential procedures.\n Load the model mapped to the events.\n Do label prediction.\n Add labels if the confidence is enough.\n Args:\n Object:\n message =\n Message {\n data: b'New issue.',\n attributes: {\n 'installation_id': '10000',\n 'repo_owner': 'kubeflow',\n 'repo_name': 'examples',\n 'issue_num': '1'\n }\n }\n \"\"\"\n installation_id = message.attributes['installation_id']\n repo_owner = message.attributes['repo_owner']\n repo_name = message.attributes['repo_name']\n issue_num = message.attributes['issue_num']\n logging.info(f'Receive issue #{issue_num} from {repo_owner}/{repo_name}')\n\n try:\n # predict labels\n self.load_yaml(repo_owner, repo_name)\n self.download_model_from_gcs()\n predictions, issue_embedding = self.predict_labels(repo_owner, repo_name, issue_num)\n self.add_labels_to_issue(installation_id, repo_owner, repo_name,\n issue_num, predictions)\n\n # log the prediction, which will be used to track the performance\n log_dict = {\n 'repo_owner': repo_owner,\n 'repo_name': repo_name,\n 'issue_num': int(issue_num),\n 'labels': predictions['labels']\n }\n logging.info(log_dict)\n\n except Exception as e:\n # hard to find out which errors should be handled differently (e.g., retrying for multiple times)\n # and how to handle the error that the same message causes for multiple times\n # so use generic exception to ignore all errors for now\n logging.error(f'Addressing issue #{issue_num} from {repo_owner}/{repo_name} causes an error')\n logging.error(f'Error type: {type(e)}')\n logging.error(e)\n\n # acknowledge the message, or pubsub will repeatedly attempt to deliver it\n message.ack()\n\n # limit the subscriber to only have one outstanding message at a time\n flow_control = pubsub.types.FlowControl(max_messages=1)\n future = subscriber.subscribe(subscription_path,\n callback=callback,\n flow_control=flow_control)\n try:\n logging.info(future.result())\n except KeyboardInterrupt:\n logging.info(future.cancel())", "async def subscribe(self, topic: str, callback: aiowamp.SubscriptionHandler, *,\n match_policy: aiowamp.MatchPolicy = None,\n node_key: str = None,\n options: aiowamp.WAMPDict = None) -> int:\n ...", "def subscribeConsumer(consumer):", "def __init__(self, topic_name, topic_type, wait_for_data=False):\n\n self.data = None\n self.lock = Lock()\n \n self.subscriber = rospy.Subscriber(topic_name, topic_type, self.callback)\n self.get(wait_for_data)", "def subscribe(self, namespace, sub_strings=None):\n req = JSONRPCRequest('subscribe', [namespace, sub_strings])\n result = yield self._send(req)\n self._cache_jsonrpc_request(req)\n raise tornado.gen.Return(result)", "def test_subscriber(self) -> None:\n stream_name = gather_subscriptions(self.user_profile)[0][0][\"name\"]\n self.make_successful_subscriber_request(stream_name)", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def pubsub(self, **kwargs):\n if not self._pubsub:\n self._pubsub = Pubsub(self, **kwargs)\n return self._pubsub", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def set_pub_sub(self):\n\n # Set trap check service client\n self.trap_cheq_srv = rospy.ServiceProxy(\"check_for_trap\", TrapCheck)\n\n # Set mix initiave controller output\n self.mix_cmd_pub = rospy.Publisher(\"mix_cmd\", Bool, queue_size=50)\n\n # Set agent TS state subscriber\n rospy.Subscriber(\"ts_state\", TransitionSystemStateStamped, self.ts_state_callback, queue_size=50)\n\n # Set human input planner\n rospy.Subscriber(\"key_cmd\", Bool, self.teleop_cmd_callback, queue_size=50)\n\n # Set planner input subscriber\n rospy.Subscriber(\"planner_cmd\", Bool, self.planner_cmd_callback, queue_size=50)", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def subscribe(self):\n if hasattr(self.bus, \"signal_handler\"):\n self.bus.signal_handler.subscribe()\n if hasattr(self.bus, \"console_control_handler\"):\n self.bus.console_control_handler.subscribe()", "def test_subscribe(self):\n dest = '/topic/dest'\n\n self.tm.subscribe(self.conn, dest)\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n self.assertEqual(len(self.conn.frames), 1)\n subscription = self.conn.frames[0].headers.pop(\"subscription\", None)\n self.assertEqual(subscription, 0)\n self.assertEqual(self.conn.frames[0], f)", "def subscribeToEvent(eventName,subscriber,msgInterface):", "def test_issue_add_subscription(self):\n pass", "def create_subscriber(self, exchange_name=None, callback=None):\n\n if not exchange_name:\n #@todo - remove this! it does not belong here!\n\n # if not create a new one based on the process id\n exchange_name = '%s_subscriber_%d' % (self.process.id, self._subscriber_cnt)\n self._subscriber_cnt += 1\n\n # create an XN\n xn = self.container.ex_manager.create_xn_queue(exchange_name)\n\n # create an XP\n # xp = self.container.ex_manager.create_xp(self.xp_base)\n # bind it on the XP\n # xn.bind(exchange_name, xp)\n\n return StreamSubscriber(from_name=xn, process=self.process, callback=callback, node=self.container.node)", "def on_subscribe(self, client, userdata, mid, granted_qos):\n\t\tprint (\"[{}] Client subscribed to {}\".format(\n\t\t\tint(time.time()),\n\t\t\tself.topic\n\t\t))\n\t\t#the following lines are here and not in on_connect() only for printing purpose\n\t\tif not self.printed_sub:\n\t\t\tself.printed_sub = True\n\t\t\tself.subscribe(\"measure/people\")", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "def __init__(__self__,\n resource_name: str,\n args: EventSubscriptionArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def test_update_subscription(self):\n pass", "async def subscribe(self, callback: Callable=None):\n LOGGER.info('Subscription added')\n await self._ros.send(self._subscribe_msg)", "async def create_and_subscribe(user_id):\n client = gql(\n query=Query,\n mutation=Mutation,\n subscription=Subscription,\n consumer_attrs={\"strict_ordering\": True, \"confirm_subscriptions\": True},\n )\n await client.connect_and_init()\n\n sub_id = await client.send(\n msg_type=\"start\",\n payload={\n \"query\": textwrap.dedent(\n \"\"\"\n subscription op_name($user_id: UserId) {\n on_chat_message_sent(user_id: $user_id) { event }\n }\n \"\"\"\n ),\n \"variables\": {\"user_id\": user_id},\n \"operationName\": \"op_name\",\n },\n )\n\n # Receive the subscription confirmation message.\n resp = await client.receive(assert_id=sub_id, assert_type=\"data\")\n assert resp == {\"data\": None}\n\n return sub_id, client", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def subscribe(self, transport, data):\r\n\r\n self.add(transport, address=data.get('hx_subscribe'))\r\n\r\n self.send(\r\n data.get('hx_subscribe'),\r\n {'message': \"%r is listening\" % transport}\r\n )", "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def subscribe(self, callback=None, max_flow=100):\n # noinspection PyArgumentList\n flow_control = FlowControl(max_messages=max_flow)\n subscription = self.subscriber_client.subscribe(\n self.subscription_path,\n callback=callback,\n flow_control=flow_control\n )\n return subscription", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def subscribe(self,\n subscription_name: str) -> pubsub_futures.StreamingPullFuture:\n self._possibly_subscribing = True\n return self._subscriber.subscribe(subscription_name, self._process_message)", "def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )" ]
[ "0.70819134", "0.70819134", "0.70819134", "0.7014957", "0.6897221", "0.6896531", "0.68738085", "0.68679523", "0.68197066", "0.6760519", "0.6746346", "0.6731105", "0.6678667", "0.6670081", "0.6621244", "0.6589251", "0.6571883", "0.654773", "0.6535721", "0.6527079", "0.65072304", "0.6408566", "0.64016026", "0.64016026", "0.63647634", "0.63580877", "0.6355541", "0.6348653", "0.63423693", "0.63217884", "0.6299407", "0.6233599", "0.62308466", "0.6228413", "0.6204658", "0.6197256", "0.61925995", "0.61879337", "0.61744106", "0.61626923", "0.6156773", "0.6127372", "0.61263126", "0.6121816", "0.6118362", "0.61064273", "0.61051786", "0.60915786", "0.6084932", "0.6075821", "0.6074306", "0.6072864", "0.6055424", "0.60476243", "0.60439116", "0.6042745", "0.60401666", "0.6038047", "0.6031595", "0.601731", "0.60149145", "0.59916365", "0.59747994", "0.59710795", "0.59705687", "0.5954035", "0.5953676", "0.59420264", "0.5940797", "0.59355533", "0.5924545", "0.59177244", "0.5909718", "0.5902339", "0.5901729", "0.5900357", "0.5891916", "0.5890217", "0.58798325", "0.5872925", "0.5856387", "0.5850297", "0.58502966", "0.5845542", "0.5844613", "0.58436275", "0.58419335", "0.58327466", "0.58318985", "0.5825542", "0.58249384", "0.58181036", "0.5808004", "0.5804756", "0.5779291", "0.5767639", "0.5767639", "0.5765434", "0.5763639", "0.5754495", "0.57471484" ]
0.0
-1
Wait for message to arrive and return it. Blocks if there is no message availalbe.
def consume(self, block=True, timeout=0): # We need to repeat this step, because there may be messages # that expired, and we expect this method to always return message. while True: # Retrieve last message ID if block: # Blocking query, wait until message is available. message_id = self.pyrps.redis.blpop(self.pyrps._ns_queue(self.queue, self.consumer_id), timeout) # blpop returns tuple(key, value), we need only value. message_id = message_id[1] else: # Non blocking query. Return if there is no message in queue. message_id = self.pyrps.redis.lpop(self.pyrps._ns_queue(self.queue, self.consumer_id)) # If there is no message in the queue, return None. if message_id is None: return None # Retrieve the message message = self.pyrps.redis.get(self.pyrps._ns_message(self.queue, message_id)) # If message still exists (no TTL has been reached), return it. if message is not None: return message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_messages(self):\n msg = self.inbox.get()\n return msg", "def wait_message(self):\n if self._state != states['open']:\n return False\n if len(self._read_queue) > 0:\n return True\n\n assert self._read_waiter is None or self._read_waiter.cancelled(), \\\n \"You may only use one wait_message() per connection.\"\n\n self._read_waiter = asyncio.Future(loop=self._loop)\n yield from self._read_waiter\n return self.wait_message()", "def wait_for_message(self, tag, timeout=None):\n def done_check():\n if self._message_queue.setdefault(tag,[]):\n value=heapq.heappop(self._message_queue[tag])[-1]\n return True,value\n return False,None\n return self._wait_in_process_loop(done_check,timeout=timeout)", "def receive_message(self):\n return self.receive_message_queue.get()", "def _wait_for_message(self, expectedmessage, timeout):\r\n \r\n # Parse the first part (the message) from the expected reply. We need\r\n # to do this, because parts of some replies will contain parameters\r\n # that differ between replies.\r\n if '_' in expectedmessage:\r\n expected = copy.copy(expectedmessage[:expectedmessage.find('_')])\r\n else:\r\n expected = copy.copy(expectedmessage)\r\n \r\n self._print(\"%s: Waiting for message '%s'\" % (self._clientnr, expected))\r\n \r\n # Wait for a message or a timeout.\r\n t0 = time.time()\r\n last_attempt = time.time()\r\n no_message = True\r\n no_timeout = True\r\n while no_message and no_timeout:\r\n # Get the current message queue.\r\n cmds = copy.deepcopy(self._incoming)\r\n # Loop through the queue.\r\n for msg in cmds:\r\n self._print(\"%s: Examining message '%s'\" % \\\r\n (self._clientnr, msg))\r\n\r\n # Check if the message is the expected message.\r\n if expected in msg:\r\n # Remove the message from the queue.\r\n self._incominglock.acquire()\r\n self._incoming.pop(self._incoming.index(msg))\r\n self._incominglock.release()\r\n no_message = False\r\n break\r\n else:\r\n self._print(\"Message ('%s') was not expected ('%s')\" \\\r\n % (msg, expectedmessage))\r\n\r\n # Check if there is a timeout.\r\n if timeout != None:\r\n if time.time() - t0 > timeout:\r\n no_timeout = False\r\n break\r\n \r\n # Check if we should re-send the message.\r\n if time.time() - last_attempt > self._reptimeout:\r\n # Let the server know what we're expecting from them.\r\n self._wait_what(expected)\r\n # Update the last attempt time.\r\n last_attempt = time.time()\r\n\r\n # Return a success Boolean and the message/fault.\r\n if no_message == False:\r\n return (True, msg)\r\n if no_timeout == False:\r\n return (False, 'timeout')\r\n return (False, 'unknown')", "def wait_until_new_message(self):\n last_message_id = self.get_loaded_messages()[-1].get_attribute(\"data-id\")\n while True:\n try:\n new_message = self.get_loaded_messages()[-1]\n if last_message_id != new_message.get_attribute(\"data-id\"):\n return new_message\n else:\n continue\n except:\n print(\"Error encountered (0x001)\")\n continue", "def wait_message(self, message=None):\r\n if not self.mw.winfo_exists():\r\n return\r\n\r\n if not self.display_game:\r\n return\r\n \r\n self.waiting_for_message = True\r\n if message is None:\r\n message = self.cur_message\r\n if (message is not None\r\n and message.end_time is not None):\r\n while True:\r\n now = datetime.now()\r\n if now >= message.end_time:\r\n self.cur_message = None\r\n SlTrace.lg(\"End of message waiting\", \"message\")\r\n break\r\n if self.mw is not None and self.mw.winfo_exists():\r\n self.mw.update()\r\n self.mw.after(int((message.end_time-now)*1000)) # rather than loop time.sleep(.01)\r\n if self.cur_message is not None:\r\n self.cur_message.destroy()\r\n self.cur_message = None\r\n self.waiting_for_message = False", "async def poll_message(self):\n message_cache = self.message_cache\n if (message_cache is not None) and message_cache:\n return message_cache.pop()\n \n if not self.message_request_more:\n return\n \n message_cache = await self.client.message_get_chunk(self.source_channel, after = self.last_message_id)\n self.message_cache = message_cache\n \n if len(message_cache) < 100:\n self.message_request_more = False\n \n if message_cache:\n return message_cache.pop()", "def get_msg(self, block=True, timeout=None):\n return self.in_queue.get(block, timeout)", "def get_msg(self, block=True, timeout=None):\n return self.in_queue.get(block, timeout)", "def message(self):\n self.wait()\n return self._message", "def read(self):\r\n assert self.status in (WAIT_LEN, WAIT_MESSAGE)\r\n if self.status == WAIT_LEN:\r\n self._read_len()\r\n # go back to the main loop here for simplicity instead of\r\n # falling through, even though there is a good chance that\r\n # the message is already available\r\n elif self.status == WAIT_MESSAGE:\r\n read = self.socket.recv(self.len - len(self.message))\r\n if len(read) == 0:\r\n logging.error(\"can't read frame from socket (get %d of %d bytes)\" %\r\n (len(self.message), self.len))\r\n self.close()\r\n return\r\n self.message += read\r\n if len(self.message) == self.len:\r\n self.status = WAIT_PROCESS", "def wait_for_message(topic, topic_class, timeout):\n msg = None\n try:\n msg = rospy.wait_for_message(topic, topic_class, timeout)\n except rospy.exceptions.ROSException as e:\n rospy.loginfo(e)\n msg = None\n return msg", "async def Read(self) -> Optional[Message]:\n return await self._read_queue.Get()", "async def fresp(self):\r\n self.waiting = True\r\n while self.waiting:\r\n try:\r\n msg = await bot.wait_for_message(author=self.author, check=self.ccheck)\r\n if self.active and not self.ended:\r\n self.waiting = False\r\n return (msg.content)\r\n elif self.ended:\r\n self.waiting = False\r\n raise CommandEndedError\r\n else:\r\n pass\r\n except discord.HTTPException:\r\n pass", "def _wait_ready(self):\n command = self._recv_from_client()\n while command != \"READY\":\n command = self._client.recv_from_client()", "def asyncRead(self, timeout=5000, staging=False, attr=None):\n if self.isConnected():\n turns = 0\n while True:\n turns += 1\n if turns > 100:\n logging.debug(\"Timeout on read after 100 iterations\")\n return None\n\n result = True\n logging.debug(\"MessageOutput size: %i\", len(self.messageOutput))\n if len(self.messageOutput) == 0 and not self.containsAttr(attr):\n logging.debug(\"Waiting for new message.\")\n if not staging:\n # spy = QSignalSpy(self._stopWaiting)\n spy = QSignalSpy(self.bufferReady)\n else:\n spy = QSignalSpy(self._staging)\n result = spy.wait(timeout) # Asynchronous wait, Timeout 5s\n\n if result and not self._hasError:\n self.lock.acquire()\n if len(self.messageOutput) == 0:\n self.lock.release()\n logging.debug(\"Race condition triggered. Wait for next message.\")\n continue\n found = False\n result = self.messageOutput[0]\n if attr is not None:\n for msg in self.messageOutput:\n if attr[0] in msg:\n if attr[1] == msg[attr[0]]:\n found = True\n result = msg\n break\n if found or attr is None:\n del self.messageOutput[self.messageOutput.index(result)]\n logging.debug(\"MessageOutput size: %i\", len(self.messageOutput))\n self.lock.release()\n\n if \"error\" not in result:\n result[\"error\"] = []\n if \"status\" not in result:\n result[\"status\"] = True\n\n return result\n else:\n logging.debug(\"Message not found. Release of lock.\")\n if attr is not None:\n logging.debug(\"Miss '%s' with value '%s'\", str(attr[0]), str(attr[1]))\n self.lock.release()\n self.bufferReady.emit()\n qApp.processEvents()\n else:\n logging.debug(\"Nothing to read.\")\n break\n else:\n logging.debug(\"Not connected. Did not read.\")\n return None", "def send_and_get_reply(self, message):\n future = self.send_message(message)\n while not future.done():\n self.recv_messages()\n\n return future.result()", "def read(self):\n assert self.status in (WAIT_LEN, WAIT_MESSAGE)\n\n if self.status == WAIT_LEN:\n self._read_len()\n # go back to the main loop here for simplicity instead of\n # falling through, even though there is a good chance that\n # the message is already available\n elif self.status == WAIT_MESSAGE:\n read = self.socket.recv(self.len - len(self.message))\n if len(read) == 0:\n logging.error(\"can't read frame from socket\" +\n \" (got %d of %d bytes)\" %\n (len(self.message), self.len))\n self.close()\n return\n self.message += read\n if len(self.message) == self.len:\n self._set_status(WAIT_PROCESS)", "def _execute(self):\n LOG.info(\"Waiting for a message...\")", "def wait_for_any_message(self, timeout=None):\n self._wait_in_process_loop(lambda: (True,None),timeout=timeout)", "def ecute(self):\n msg = self.up_queue_recv_socket.recv()\n result, e = self.up_queue.get()\n if e is not None:\n raise e\n return result", "async def read(self) -> None:\n make_non_blocking(self.stream)\n\n while not self.stream.closed:\n message = None\n try:\n message = await self.read_one()\n\n if not message:\n await self.sleep()\n continue\n else:\n self.wake()\n\n IOLoop.current().add_callback(self.queue.put_nowait, message)\n except Exception as e: # pragma: no cover\n self.log.exception(\n \"%s couldn't enqueue message: %s (%s)\", self, message, e\n )\n await self.sleep()", "def wait(self):\n self.stream.read_until(\"\\n\", self._on_read)", "async def get(self, timeout: Optional[float] = None) -> Optional[Data]:\n completed: bool\n async with self.read_mutex:\n if timeout is not None and timeout <= 0:\n if not self.message_complete.is_set():\n return None\n if self.get_in_progress:\n # This should be guarded against with the read_mutex,\n # exception is only here as a failsafe\n raise ServerError(\n \"Called get() on Websocket frame assembler \"\n \"while asynchronous get is already in progress.\"\n )\n self.get_in_progress = True\n\n # If the message_complete event isn't set yet, release the lock to\n # allow put() to run and eventually set it.\n # Locking with get_in_progress ensures only one task can get here.\n if timeout is None:\n completed = await self.message_complete.wait()\n elif timeout <= 0:\n completed = self.message_complete.is_set()\n else:\n try:\n await asyncio.wait_for(\n self.message_complete.wait(), timeout=timeout\n )\n except asyncio.TimeoutError:\n ...\n finally:\n completed = self.message_complete.is_set()\n\n # Unpause the transport, if its paused\n if self.paused:\n self.protocol.resume_frames()\n self.paused = False\n if not self.get_in_progress: # no cov\n # This should be guarded against with the read_mutex,\n # exception is here as a failsafe\n raise ServerError(\n \"State of Websocket frame assembler was modified while an \"\n \"asynchronous get was in progress.\"\n )\n self.get_in_progress = False\n\n # Waiting for a complete message timed out.\n if not completed:\n return None\n if not self.message_complete.is_set():\n return None\n\n self.message_complete.clear()\n\n joiner: Data = b\"\" if self.decoder is None else \"\"\n # mypy cannot figure out that chunks have the proper type.\n message: Data = joiner.join(self.chunks) # type: ignore\n if self.message_fetched.is_set():\n # This should be guarded against with the read_mutex,\n # and get_in_progress check, this exception is here\n # as a failsafe\n raise ServerError(\n \"Websocket get() found a message when \"\n \"state was already fetched.\"\n )\n self.message_fetched.set()\n self.chunks = []\n # this should already be None, but set it here for safety\n self.chunks_queue = None\n return message", "def getMsg(self, time=None, msg=\"\"):\n\t\ttry:\n\t\t\tr = self._queue.get(True, time)\n\t\texcept Empty:\n\t\t\traise TimeoutException(\"%s.getMsg(timeout=%s,msg=%s)\"%(self,time,msg))\n\t\telse:\n\t\t\tself._queue.task_done()\n\t\t\treturn r", "def read(self):\n if not self._consuming:\n yield from self._begin_consuming()\n return (yield from self._message_queue.get())", "def receive(self):\n if self.sock is not None:\n return recv_msg(self.sock)\n return None", "def receive_message(self):\n try:\n self.clockCheckStop = datetime.now()\n data = self.listener.recvfrom(BUF_SZ)\n return fxp_bytes_subscriber.unmarshal_message(data[0])\n except ConnectionError as err:\n # a ConnectionError means it will never succeed\n print('closing: {}'.format(err))\n return\n except Exception as err:\n # other exceptions we assume are due to being non-blocking;\n # we expect them to succeed in future\n print('failed {}'.format(err))\n return", "def __get_message(self):\r\n if self.is_connected():\r\n try:\r\n message = self.__socket.recv(Communicator.BUFFER_SIZE).decode()\r\n # Don't register again - the remote host is closed. Close app.\r\n if len(message) == 0:\r\n self.__root.destroy()\r\n return\r\n if self.__bound_func is not None:\r\n self.__bound_func(message)\r\n except socket.error:\r\n pass\r\n self.__root.after(self.WAIT_PERIOD, self.__get_message)", "def recv(self) -> Optional[bytes]:\n ready, _, _ = select.select([self.socket], [], [], 0)\n if len(ready) != 0:\n new_bytes = self.socket.recv(self.BUFFER_SIZE)\n self.__recv_buffer = self.__recv_buffer + new_bytes\n return self.__parse_one_message()", "def wait(self):\n try:\n buf = os.read(self._fd, 8)\n return struct.unpack(\"Q\", buf)[0]\n except OSError as e:\n if e.errno == errno.EAGAIN:\n return 0\n else:\n raise e", "def wait_for_reply(message, reply_type=None, timeout=None, bus=None):\n auto_close = bus is None\n bus = bus or get_mycroft_bus()\n if isinstance(message, str):\n try:\n message = json.loads(message)\n except:\n pass\n if isinstance(message, str):\n message = Message(message)\n elif isinstance(message, dict):\n message = Message(message[\"type\"],\n message.get(\"data\"),\n message.get(\"context\"))\n elif not isinstance(message, Message):\n raise ValueError\n response = bus.wait_for_response(message, reply_type, timeout)\n if auto_close:\n bus.close()\n return response", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def next(self): # wait for 5 minutes after sending message\n if self.queue:\n messages = self.queue.get_messages(1,visibility_timeout=self.visibility_timeout)\n if messages:\n for m in messages:\n return m\n raise StopIteration", "def wait_till_read_out():\n\n\trespond = send_command('waitreadout')", "def get(self) -> Optional[Message]:\n self._recv()\n if not self.inq:\n return None\n return self.inq.popleft()", "def wait (self, seconds=0.0):\r\n\t\tstart_time = time.time()\r\n\t\twhile time.time() < start_time + seconds:\r\n\t\t\tself.receive()", "def get(self, block=True, timeout=None):\n if block and timeout is None:\n self.message = self.handle.read(wait_time_seconds=20)\n while self.message is None:\n self.message = self.handle.read(wait_time_seconds=20)\n elif block and 1 <= timeout <= 20:\n self.message = self.handle.read(wait_time_seconds=timeout)\n elif not block and timeout is None:\n self.message = self.handle.read(wait_time_seconds=0)\n else:\n raise Exception('invalid arguments')\n if self.message is None:\n raise Empty\n return self.message.get_body()", "def get(self, message = None, wait = 2):\n # Verify if sonar is initialized\n if not self.initialized:\n raise SonarNotConfigured\n\n expected_name = Message.to_string(message)\n if message:\n rospy.logdebug(\"Waiting for %s message\", expected_name)\n else:\n rospy.logdebug(\"Waiting for unlabeled message\")\n\n # Determine end time\n end = datetime.datetime.now() + datetime.timedelta(seconds=wait)\n\n # Wait until received if a specific message ID is requested, otherwise wait forever\n while message is None or datetime.datetime.now() < end:\n if message is None:\n try:\n reply = self.conn.get_reply()\n return reply\n except:\n break\n else:\n try:\n reply = self.conn.get_reply(message)\n except:\n break\n # Verify reply ID if requested\n if reply.id == message:\n #rospy.logdebug(\"Found %s message\", expected_name)\n return reply\n else:\n rospy.logwarn(\"Received unexpected %s message\", reply.name)\n # Timeout\n rospy.logerr(\"Timed out before receiving message: %s\", expected_name)\n raise TimeoutError()", "def wait(self, signal):\n while True:\n s = self.receive()\n if s == signal:\n break", "def get_message_from_queue(self):\n message = None, None\n\n try:\n message = self.queue.get(block=True, timeout=3)\n except Empty:\n self.fail(msg='Queue get() failed empty')\n\n return message", "async def read_one_message(self):\n if not self.connected:\n return None\n\n try:\n header = await self.reader.readexactly(2)\n except SocketError as err:\n if err.errno == errno.ECONNRESET:\n self.log.error('Connection reset by peer')\n self.connected = False\n if err.errno == errno.EHOSTUNREACH:\n self.log.error('Spa unreachable')\n self.connected = False\n else:\n self.log.error('Spa socket error: {0}'.format(str(err)))\n return None\n except Exception as e:\n self.log.error('Spa read failed: {0}'.format(str(e)))\n return None\n\n if header[0] == M_START:\n # header[1] is size, + checksum + M_END (we already read 2 tho!)\n rlen = header[1]\n else:\n return None\n\n # now get the rest of the data\n try:\n data = await self.reader.readexactly(rlen)\n except Exception as e:\n self.log.errpr('Spa read failed: {0}'.format(str(e)))\n return None\n\n full_data = header + data\n # don't count M_START, M_END or CHKSUM (remember that rlen is 2 short)\n crc = messages.Message.crc(full_data[1:rlen - 1])\n if crc != full_data[-2]:\n self.log.error('Message had bad CRC, discarding')\n return None\n\n # self.log.error('got update: {}'.format(full_data.hex()))\n return full_data", "async def wait_for_read(self):\n if not self.socket:\n self.logger.warning(\"Wait for READ: No Socket Ready\")\n return\n loop = asyncio.get_running_loop()\n read_ev = asyncio.Event()\n fileno = self.socket.fileno()\n loop.add_reader(fileno, read_ev.set)\n try:\n await asyncio.wait_for(read_ev.wait(), self._timeout)\n finally:\n loop.remove_reader(fileno)", "def send_message_blocking(self, message):\n serial_message = create_message_string(message)\n try:\n result_msg = threads.blockingCallFromThread(reactor, self._r_send_message_and_wait, message.id, serial_message)\n if result_msg.result_code > 0:\n raise MessageHandleError(error_code=result_msg.result_code, error_details = result_msg.result)\n return result_msg.result\n except TimeoutError:\n self._waiting_messages.pop(message.id, None)\n raise", "def pop_next_message(self):\n messages = self._msq_queue.receive_messages(\n AttributeNames=['All'],\n MessageAttributeNames=['All'],\n MaxNumberOfMessages=1,\n VisibilityTimeout=self._vis_timeout,\n WaitTimeSeconds=self._wait_time,\n )\n if not messages:\n raise NoMessagesAfterLongPollingAvailableException(\"No Messages receivable from Queue\")\n elif len(messages) == 1:\n return dict_for_message(messages[0])\n else:\n raise Exception(\"Received more Messages than intended\")", "def wait(self):\n while self._worker is None:\n # wait() before self._run()\n time.sleep(0.1)\n self._worker.join()\n return self.poll()", "def recv(self):\n msg = self._mailbox.get()\n if msg is ActorExit:\n raise ActorExit()\n return msg", "def wait_for_async_data(self) -> None:\n if self.__is_active:\n self.async_read()", "def wait(self):\n with self.__lock:\n while not self.__complete:\n self.__lock.wait()", "def _get_message(self):\n if not self.opened: return None\n \n # read as much as possible\n read = 0\n try: \n chars = [b'0']\n addr = None\n logger.debug(\"%s: Socket read started...\" % \\\n self.__class__.__name__)\n while(len(chars) > 0):\n try:\n chars, addr = self._udp_socket.recvfrom(1)\n except socket.error:\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n break\n except socket.timeout:\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n break\n if len(chars) > 0:\n if addr not in self._rbuff:\n self._rbuff[addr] = []\n if sys.version_info[0] > 2:\n self._rbuff[addr] += chars\n else:\n self._rbuff[addr] += map(ord,chars)\n read += len(chars)\n else:\n logger.error(\"%s: ...Socket has been closed.\" % \\\n (self.__class__.__name__))\n self.close()\n return None\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n except Exception as ex:\n logger.error(\"%s: ...Socket read failed:\\n%s\" % \\\n (self.__class__.__name__,str(ex)))\n raise utils.TransportError \\\n (\"Socket Message get failed!\\n\" + str(ex))\n if read > 0 :\n logger.info(\"%s: Read %d bytes.\" % (self.__class__.__name__, read))\n \n # Check all Clients\n for addr in self._rbuff.keys():\n \n # Look for message start (SOH XX ~XX)\n disc = []\n while(len(self._rbuff[addr]) > 3 and (\n self._rbuff[addr][0] != messages.HorizonMessage.SOH or\n self._rbuff[addr][1] != 0xFF&(~self._rbuff[addr][2]) or\n self._rbuff[addr][1] == 0)):\n disc.append(self._rbuff[addr].pop(0))\n if len(disc) > 0:\n logger.info(\"%s: Discarded %d bytes:\\n%s\" % (\n self.__class__.__name__, len(disc), \n ' '.join(map(utils.hex,disc))))\n if len(self._rbuff[addr]) < 3:\n continue\n \n # Extract Expected Message Length\n length = self._rbuff[addr][1] + 3 \n \n # Look for next message start\n for i in range(1,len(self._rbuff[addr])-2):\n if self._rbuff[addr][i] == messages.HorizonMessage.SOH and \\\n self._rbuff[addr][1]==0xFF&(~self._rbuff[addr][2]) and \\\n self._rbuff[addr][1] != 0:\n if i < length:\n length = i\n break\n \n # Not all read yet\n if len(self._rbuff[addr]) < length:\n continue\n \n # Extract Message\n raw = self._rbuff[addr][0:length]\n self._rbuff[addr] = self._rbuff[addr][length:]\n logger.info(\"%s: Message of %d bytes found:\\n%s\" % (\n self.__class__.__name__, len(raw), \n ' '.join(map(utils.hex,raw))))\n msg = messages.HorizonMessage(version = self._version, \n payload_type = payloads.HorizonPayload,\n raw = raw, store_error = True)\n \n # update timestamp\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # find connection\n for client in self._clients:\n if client.address == addr:\n client._last = timestamp\n client.route_message(msg)\n continue\n \n # new connection\n if len(self._clients) >= self._max:\n continue\n self._clients.append(HorizonTransport_Socket(\n sock = self._udp_socket,\n host = addr[0],\n port = addr[1],\n name = \"%s:%d\" % addr,\n store_timeout = 1,\n version = self._version))\n self._clients[-1].opened = True\n self._router.add_client(self._clients[-1])\n logger.info(\"%s: New connection to %s:%d.\" % \\\n (self.__class__.__name__,self._clients[-1].address[0],\n self._clients[-1].address[1]))\n client._last = timestamp\n client.route_message(msg)\n \n \n # update timestamp\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # Connection Timeout?\n for i in range(len(self._clients),0,-1):\n last = self._clients[i-1].get_last_time()\n if ((timestamp - last >= self._rec_timeout) or\\\n (timestamp < last and 4294967295 - \\\n last + timestamp >= self._rec_timeout)):\n logger.warning(\"%s: Connection to %s timed-out!\" % \\\n (self.__class__.__name__,self._clients[i-1].name))\n self._router.remove_client(self._clients[i-1])\n self._clients[i-1].opened = False\n self._clients.remove(self._clients[i-1])\n \n return None", "def _wait_for_reply(self, message, expectedreply, timeout):\r\n \r\n # Parse the first part (the message) from the expected reply. We need\r\n # to do this, because parts of some replies will contain parameters\r\n # that differ between replies.\r\n if '_' in expectedreply:\r\n expected = copy.copy(expectedreply[:expectedreply.find('_')])\r\n else:\r\n expected = copy.copy(expectedreply)\r\n \r\n # Send the message to the server.\r\n self._msg_server(message)\r\n\r\n # Wait for the expected reply.\r\n success, reply = self._wait_for_message(expected, timeout)\r\n\r\n # Return a success Boolean and the reply/fault.\r\n return (success, reply)", "def recv(self):\n\t\tmsg = self.pb.recv()\n\n\t\tif msg.get(0) == \"timeout\":\n\t\t\tprint \"You failed to find Toby before the time ran out!\"\n\t\t\tself.cleanup()\n\t\telif msg.get(0) == \"toby\":\n\t\t\tprint \"You found Toby. Good job!\"\n\t\t\tself.cleanup()\n\t\telif msg.get(0) == \"dead\":\n\t\t\tprint \"You died!\"\n\t\t\tself.cleanup()\n\n\t\treturn msg", "def is_waiting_for_message(self):\r\n return self.waiting_for_message", "def wait(self):\n return self.bot_client.send_command(_Command.Wait)", "def _get_message(self, block=True, timeout=0.1, get_partition_info=None,\n update_offset=True):\n if self.got_error:\n raise self.error\n try:\n meta, message = self.queue.get(timeout=timeout)\n\n if update_offset:\n # Update partition offset\n self.offsets[meta.partition] = message.offset + 1\n\n # Count, check and commit messages if necessary\n self.count_since_commit += 1\n self._auto_commit()\n\n if get_partition_info is None:\n get_partition_info = self.partition_info\n if get_partition_info:\n return meta, message\n else:\n return message\n except Empty:\n return None", "async def get(\n self, *, no_ack: bool = False,\n fail: bool = True, timeout: TimeoutType = 5,\n ) -> Optional[IncomingMessage]:\n\n channel = await self.channel.get_underlay_channel()\n msg: DeliveredMessage = await channel.basic_get(\n self.name, no_ack=no_ack, timeout=timeout,\n )\n\n if isinstance(msg.delivery, aiormq.spec.Basic.GetEmpty):\n if fail:\n raise QueueEmpty\n return None\n\n return IncomingMessage(msg, no_ack=no_ack)", "def receive_message(self, timeout: Optional[float] = None) -> bool:\n self.logger.debug(\"waiting for incomming message (timeout=%f seconds)\", timeout)\n message_bytes = self.receive_bytes(timeout)\n if not message_bytes:\n self.logger.debug(\"message receive timeout\")\n return False\n\n msg_round = get_message_round(message_bytes)\n msg_type = get_message_type(message_bytes)\n msg_sender = get_message_sender(message_bytes)\n self.logger.debug(f\"{msg_type.name.lower()} message received: round={msg_round}, sender={msg_sender}\")\n\n try:\n self.verify_raw_message(message_bytes)\n self.enqueue_message(\n MessageQueueItem(\n round=msg_round,\n phase=msg_type.to_phase(),\n timestamp=self.actual_time(),\n content=message_bytes,\n )\n )\n return True\n except ValueError as e:\n self.logger.debug(e)\n return False", "def _recv(self) -> None:\n if not self.connected or now() < self.next_poll:\n return\n self.next_poll += self.poll_interval\n data = []\n while True:\n try:\n data.append(self.endpoint.recv(BUFFSIZE))\n except BlockingIOError:\n break\n if data:\n stream = io.BytesIO(b\"\".join(data))\n while True:\n try:\n info = pickle.load(stream)\n msg = Message(*info)\n self.inq.append(msg)\n except EOFError:\n break", "def wait(self):\n try:\n self._handler.wait()\n except Exception as e:\n raise EventHubError(\"Send failed: {}\".format(e))", "def read(self):\n if self._automata.any_message():\n msg = self._automata.get_message()\n # if there is a hello message\n # if len(self._buffer) == 0:\n # if we are not into reading a new herald message\n if to_string(msg) == to_string(HELLO_MESSAGE):\n # call the hello received callback\n if self._hello_received_callback:\n self._hello_received_callback()\n # exiting before continuing in the\n # creation of an herald message\n return None\n self._buffer.append(msg)\n if len(self._buffer) >= 8:\n res = SerialHeraldMessage(*self._buffer)\n self._buffer.clear()\n return res\n return None", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def _get_message(self):\n if not self.opened: return None\n if not self._socket.is_open(): \n self.close()\n return None\n return self._socket._get_message()", "def _get_message(self):\n if not self.opened: return None\n if not self._socket.is_open(): \n self.close()\n return None\n return self._socket._get_message()", "def wait_until_finished(self) -> None:\n if not self._parent_signal_conn:\n raise ValueError(\"Process not started.\")\n if self._async_mode:\n raise RuntimeError(\"wait_until_finished should only be called in sync_mode\")\n while self._parent_signal_conn.poll(timeout=None):\n try:\n result = self._parent_signal_conn.recv()\n except EOFError:\n return\n self._process_message(result)\n if isinstance(result, DagParsingStat):\n # In sync mode (which is the only time we call this function) we don't send this message from\n # the Manager until all the running processors have finished\n return", "def recieve(self):\n\t\tif self._connected == True:\n\t\t\treturn self._sock.recv(1)", "def wait_for_response(self, message: str = None, delay_time: int = 0):\n self._num_rounds += 1\n logging.info(\n f'{self._world_name} waiting for response at round {self._num_rounds}'\n )\n if delay_time > 0:\n time.sleep(delay_time)\n self.agent.observe(\n {'id': constants.ONBOARDING_AGENT, 'text': message, 'episode_done': False}\n )\n self.messages.append(self.agent.act(timeout=self.turn_timeout))", "def call_and_wait(self, private_key, recipient_id, message, timeout):\n return self._samp_hub.callAndWait(private_key, recipient_id, message, timeout)", "def _get_message(self):\n read = 0\n try:\n \n # read as much as possible\n chars = [b'0']\n addr = None\n logger.debug(\"%s: Socket read started...\" % \\\n self.__class__.__name__)\n while(len(chars) > 0):\n try:\n chars, addr = self._socket.recvfrom(1)\n except socket.error:\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n break\n except socket.timeout:\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n break\n if len(chars) > 0 and (self._addr == None or addr == None or \n addr ==self._addr):\n \n # Encryption???\n if self._crypt != None:\n self._ubuff += chars\n if len(self._ubuff) >= self._crypt.block_size:\n logger.debug(\"%s: Decryption started...\" % \\\n self.__class__.__name__)\n \n # perform the decryption\n chars = self._crypt.decrypt(self._ubuff[:\n self._crypt.block_size])\n logger.debug(\"%s: ...decryption complete.\" % \\\n self.__class__.__name__)\n else:\n return None\n \n if sys.version_info[0] > 2:\n self._rbuff += chars\n else:\n self._rbuff += map(ord,chars)\n read += len(chars)\n else:\n logger.error(\"%s: ...Socket has been closed.\" % \\\n (self.__class__.__name__))\n self.close()\n return None\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n \n # Read Failed\n except Exception as ex:\n logger.error(\"%s: ...Socket read failed:\\n%s\" % \\\n (self.__class__.__name__,str(ex)))\n raise utils.TransportError \\\n (\"Socket Message get failed!\\n\" + str(ex))\n if read > 0 :\n logger.info(\"%s: Read %d bytes.\" % (self.__class__.__name__, read))\n \n # Look for message start (SOH XX ~XX)\n disc = []\n while(len(self._rbuff) > 3 and (\n self._rbuff[0] != messages.HorizonMessage.SOH or\n self._rbuff[1] != 0xFF&(~self._rbuff[2]) or\n self._rbuff[1] == 0)):\n disc.append(self._rbuff.pop(0))\n if len(disc) > 0:\n logger.info(\"%s: Discarded %d bytes:\\n%s\" % (\n self.__class__.__name__, len(disc), \n ' '.join(map(utils.hex,disc))))\n if len(self._rbuff) < 3:\n return None\n \n # Extract Expected Message Length\n length = self._rbuff[1] + 3 \n \n # Look for next message start\n for i in range(1,len(self._rbuff)-2):\n if self._rbuff[i] == messages.HorizonMessage.SOH and \\\n self._rbuff[1] == 0xFF&(~self._rbuff[2]) and \\\n self._rbuff[1] != 0:\n if i < length:\n length = i\n break\n \n # Not all read yet\n if len(self._rbuff) < length:\n return None\n \n # Return Message\n raw = self._rbuff[0:length]\n self._rbuff = self._rbuff[length:]\n logger.info(\"%s: Message of %d bytes found:\\n%s\" % (\n self.__class__.__name__, len(raw), \n ' '.join(map(utils.hex,raw))))\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n self._last = timestamp\n return messages.HorizonMessage(payload_type = payloads.HorizonPayload,\n raw = raw, store_error = True)", "def read_message_box(timeout=default_timeout):\n start_time = time.time()\n while time.time() - start_time <= timeout:\n try:\n if _is_element_present(controls['Message Box']['Message']):\n return _get_text(controls['Message Box']['Message'])\n except:\n continue\n else:\n logger.warning(\"Could not get text of message box\")\n return None", "def _wait_for_event_in_queue(self):\n try:\n event = self._queue.get(timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)\n self._queue.task_done()\n except Empty:\n # No elements in Queue, return None\n event = None\n\n return event", "async def recv(self):\n return await self.receptor.response_queue.get()", "def wait_for_event(event):\r\n return event.accept()", "def get_message(cls):\n rp = cls.get()\n try:\n message = rp.queue_send.get_nowait()\n except Exception:\n return None\n\n return message", "def wait(self, timeout=600):\n s = datetime.datetime.now()\n status = json.loads(self.get())\n while status['status'] != 'COMPLETE':\n status = self.get()\n e = datetime.datetime.now()\n if (e - s).seconds > timeout:\n raise RuntimeError('timeout')\n return status", "def _recv(self):\n result = self._con.receive()\n if result.startswith(Parser.NOT_OK_MSG) or len(result) == 0:\n return result\n while not result.endswith(Parser.OK_MSG + '\\n') and not result.startswith(Parser.OK_MSG):\n result += self._con.receive()\n return result", "def receive_next_update(self) -> telegram.Update:\n # Pop data from the queue\n data = \"\"\n try:\n data = self.queue.get(timeout=self.cfg.telegram[\"conversation_timeout\"])\n except queuem.Empty:\n # If the conversation times out, gracefully stop the thread\n self.__graceful_stop(StopSignal(\"timeout\"))\n # Check if the data is a stop signal instance\n if isinstance(data, StopSignal):\n # Gracefully stop the process\n log.debug(\"Waiting for a specific message...\")\n self.__graceful_stop(data)\n # Return the received update\n return data", "def _get_message(self):\n if not self.opened: return None\n read = 0\n try:\n \n # read as much as possible\n chars = [b'0']\n logger.debug(\"%s: Serial port read started...\" % \\\n self.__class__.__name__)\n while(len(chars) > 0):\n chars = self._serial_port.read(256)\n if len(chars) > 0:\n try:\n getattr(serial,\"serial_for_url\")\n if sys.version_info[0] > 2:\n self._rbuff += chars\n else:\n self._rbuff += map(ord,chars)\n except AttributeError:\n self._rbuff += map(ord,chars)\n read += len(chars)\n logger.debug(\"%s: ...serial port read complete.\" % \\\n self.__class__.__name__)\n \n # Read Failed\n except Exception as ex:\n logger.error(\"%s: ...serial port read failed:\\n%s\" % \\\n (self.__class__.__name__,str(ex)))\n raise utils.TransportError \\\n (\"Serial Message get failed!\\n\" + str(ex))\n if read > 0 :\n logger.info(\"%s: Read %d bytes.\" % (self.__class__.__name__, \n read))\n\n # Look for message start (SOH)\n disc = []\n while(len(self._rbuff) > 3 and (\n self._rbuff[0] != messages.HorizonMessage.SOH or\n self._rbuff[1] != 0xFF&(~self._rbuff[2]) or\n self._rbuff[1] == 0)):\n disc.append(self._rbuff.pop(0))\n\n if len(disc) > 0:\n logger.info(\"%s: Discarded %d bytes:\\n%s\" % (\n self.__class__.__name__, len(disc), \n ' '.join(map(utils.hex,disc))))\n\n if len(self._rbuff) < 3:\n return None\n \n length = self._rbuff[1] + 3\n \n # Look for next message start\n for i in range(1,len(self._rbuff)-2):\n if self._rbuff[i] == messages.HorizonMessage.SOH and \\\n self._rbuff[1] == 0xFF&(~(self._rbuff[2])) and \\\n self._rbuff[1] != 0:\n if i < length:\n length = i\n break\n \n # Not all read yet\n if len(self._rbuff) < length:\n return None\n \n # Return Message\n raw = self._rbuff[0:length]\n self._rbuff = self._rbuff[length:]\n logger.info(\"%s: Message of %d bytes found:\\n%s\" % (\n self.__class__.__name__, len(raw), \n ' '.join(map(utils.hex,raw))))\n\n return messages.HorizonMessage(raw = raw, \n payload_type = payloads.HorizonPayload,\n store_error = True)", "def has_an_incomming_message(self):\n return self.pipe_start.poll(1)", "def _wait_reply(self, comm_id, call_id, call_name, timeout, retry=True):\n def reply_received():\n \"\"\"The reply is there!\"\"\"\n return call_id in self._reply_inbox\n if not self.wait_until(reply_received):\n if retry:\n self._wait_reply(comm_id, call_id, call_name, timeout, False)\n return\n raise TimeoutError(\n \"Timeout while waiting for '{}' reply.\".format(\n call_name))", "def next_message(self):\n while self.queue.consuming:\n yield self.queue.channel._consume_message()", "def receive(self):\n logging.debug(\"Receiving message\")\n while self.tcpsocket.bytesAvailable() > 0:\n # while some unread bytes available\n stream = QDataStream(self.tcpsocket)\n stream.setVersion(QDataStream.Qt_5_3)\n\n if not self.isRecieving: # For a new Message get the Message Size\n if self.tcpsocket.bytesAvailable >= 4: # since reading UInt32\n self.messageSize = stream.readUInt32()\n logging.debug(\"Start of new message of size %i\", self.messageSize)\n self.isRecieving = True\n else:\n break\n\n else: # For a continued message keep reding until whole message is in buffer\n s = min(self.tcpsocket.bytesAvailable(), self.messageSize - len(self.messageBuffer))\n self.messageBuffer.append(stream.readRawData(s))\n if len(self.messageBuffer) == self.messageSize:\n logging.debug(\"Finished receiving message of size %i\", self.messageSize)\n self._processBuffer()", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "async def read(self) -> Union[dictwrapper, str]:\n while True:\n await self.connect()\n try:\n rx_timeout = self.alive_opts.get('rx_timeout', None)\n reader = self.reader.readuntil(separator=b'\\n')\n self.bresponse = await asyncio.wait_for(reader,\n rx_timeout)\n self.response = polystr(self.bresponse)\n if self.response.startswith(\n \"{\") and self.response.endswith(\"}\\r\\n\"):\n self.unpack(self.response)\n self._oldstyle_shim()\n self.valid |= PACKET_SET\n return self.data\n return self.response\n except asyncio.CancelledError:\n self.close()\n raise\n except Exception as exc: # pylint: disable=W0703\n error = 'timeout' if isinstance(\n exc, asyncio.TimeoutError) else exc\n self.logger.warning(\n f'Failed to get message from GPSD: {error}')\n self.close()\n if self.reconnect:\n # Try again later\n await asyncio.sleep(self.reconnect)\n else:\n raise", "def wait(self):\n self.queue.join()", "async def wait_until_done(self) -> None:\n ...", "def _read_data(self):\n while True:\n try:\n data = yield from asyncio.wait_for(self._socket.recv(), 1)\n except asyncio.TimeoutError:\n continue\n except asyncio.CancelledError:\n break\n except ConnectionClosed:\n break\n\n self._push_packet(data)\n\n self._loop.call_soon(self.close)", "def get_next_message(self):\n newest_available_offset = self.broker.get_last_offset(self.topic_name)\n if self.offset <= newest_available_offset:\n msg = self.broker.read_message(self.topic_name, self.offset)\n self.offset += 1\n return msg", "def wait_for_data(receiver):\n\n while not receiver.available(pipes[1]):\n time.sleep(0.01)", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def read_msg(self):\n if self.state == 'connected':\n if 0 == len(self.buf):\n self.buf = self.inout.recv(Mtcpfns.TCP_MAX_PACKET)\n if 0 == (self.buf):\n self.state = 'disconnected'\n raise EOFError\n pass\n self.buf, data = Mtcpfns.unpack_msg(self.buf)\n return data\n else:\n raise IOError(\"read_msg called in state: %s.\" % self.state)", "def waitReadable( self, timeoutms=None ):\n if len( self.readbuf ) == 0:\n self.pollOut.poll( timeoutms )", "def receive(self):\n raw_msglen = self.recvall(4)\n if not raw_msglen:\n return None\n msglen = stc.unpack('>I', raw_msglen)[0]\n # Read the message data\n return self.recvall(msglen)", "async def wait_until_ready(self) -> None:\n await self._ready.wait()", "def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )", "async def wait_until_ready(self):\n await self._ready.wait()", "def async_read(self):\n self.lock.acquire()\n\n # append data\n self.rx_buffer += self.interface.read()\n\n # ensure first byte start with 0xbc\n if len(self.rx_buffer) > 0:\n if self.rx_buffer[0] != 0xbc:\n try:\n pkt_start = self.rx_buffer.index(0xbc)\n self.rx_buffer = self.rx_buffer[pkt_start:]\n except ValueError:\n self.rx_buffer = bytes()\n\n # check if we got a valid packet\n if len(self.rx_buffer) >= 4:\n pkt_size = unpack('<H', self.rx_buffer[2:4])[0]\n # check if we got a complete packet\n if len(self.rx_buffer) >= (pkt_size + 5):\n # yep, parse this packet\n packet = Packet.fromBytes(self.rx_buffer[:pkt_size+5])\n self.rx_buffer = self.rx_buffer[pkt_size+5:]\n self.lock.release()\n return packet\n\n # otherwise, return None\n self.lock.release()\n return None", "def ready(self):\n return self._channel.recv_ready()", "async def _monitor_recv(self):\n\n while True:\n await RisingEdge(self.clock)\n await ReadOnly()\n if self.bus.valid.value:\n self._recv(int(self.bus.data.value))" ]
[ "0.78191537", "0.7661847", "0.7064476", "0.69554555", "0.69036627", "0.68705696", "0.6781577", "0.6720875", "0.66924024", "0.66924024", "0.66751784", "0.6674056", "0.6660193", "0.6631681", "0.66040105", "0.6603838", "0.6585064", "0.65825886", "0.65714115", "0.6550374", "0.6524112", "0.6477462", "0.6464591", "0.6434783", "0.64342827", "0.63905", "0.6386223", "0.6373556", "0.6369207", "0.6359689", "0.63419425", "0.6338592", "0.63345957", "0.6304108", "0.62870765", "0.6266645", "0.6228485", "0.61960477", "0.61890256", "0.6183192", "0.61701965", "0.6159611", "0.6158575", "0.61313707", "0.6118563", "0.6097381", "0.6081803", "0.6075676", "0.60735285", "0.6070571", "0.6065937", "0.6060852", "0.60562074", "0.60560876", "0.60538256", "0.6051702", "0.6046839", "0.6044324", "0.60436547", "0.6041398", "0.6040001", "0.60384244", "0.6034161", "0.6034161", "0.6033301", "0.6021621", "0.6021023", "0.60138404", "0.6009341", "0.60048145", "0.59957325", "0.5995066", "0.59949875", "0.5987562", "0.5986974", "0.5954561", "0.59424925", "0.5910047", "0.59046733", "0.5903079", "0.5897702", "0.58974653", "0.58966756", "0.5894391", "0.5892713", "0.5891131", "0.5890311", "0.587839", "0.5877819", "0.587171", "0.587171", "0.5870428", "0.5869554", "0.5868899", "0.5868186", "0.58589566", "0.5856575", "0.5851782", "0.5849019", "0.5844697" ]
0.60150254
67
Unsubscribe from message queue and destroy it. Do not call if you want persistent queues or if you access one queue from multiple processes.
def unsubscribe(self): # Unsubscribe self.pyrps.redis.srem(self.pyrps._ns_subscriptions(self.queue), self.consumer_id) # Remove message queue self.pyrps.redis.delete(self.pyrps._ns_queue(self.queue, self.consumer_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy_queue(self):\n response = self.queue.delete()\n if self._is_error_call(response):\n raise RuntimeError('SQS could not delete queue: %s' % response)\n self.queue, self.queue_name = None, None", "def __clear_message_queue(self):\r\n self.__lib.CC_ClearMessageQueue(self.__serno)", "def unsubscribe(self):\r\n self._unregister()", "def remove_queue(self, queue) -> None:\r\n self.receive_queues.remove(queue)", "async def unsubscribe(self):\n LOGGER.info('Subscription removed')\n await self._ros.send(self._unsubscribe_msg)", "def delete_queue(self, queue_name):\n amqp_session = self.__broker.getAmqpSession()\n amqp_session.queue_delete(queue_name)", "def __del__(self):\n self.unsubscribe()", "def del_queue(self, queue_id):\n del self.queue_dict[queue_id]", "def drop_message(self):\n heapq.heappop(self._message_queue)", "def remove_queue(self, queue):\n with self.mutex:\n self.queues.remove(queue)", "def delete_queue(self):\n self.work_queue_client.delete_queue()", "def unsubscribe(self):\n pass # pragma: no cover", "def unlisten(self, prefix: str) -> None:\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def _cleanup_method(self, queue_name, ep=None):\n if ep._chan is not None and not ep._chan._queue_auto_delete:\n # only need to delete if AMQP didn't handle it for us already!\n # @TODO this will not work with XOs (future)\n try:\n ch = self.container.node.channel(RecvChannel)\n ch._recv_name = NameTrio(get_sys_name(), \"%s.%s\" % (get_sys_name(), queue_name))\n ch._destroy_queue()\n except TransportError as ex:\n log.warn(\"Cleanup method triggered an error, ignoring: %s\", ex)", "def _queue_delete(self, queue):\n\n queue.delete()", "def message_delete(self):\r\n SlTrace.lg(\"Destroying timed message\", \"message\")\r\n if self.cur_message is not None:\r\n SlTrace.lg(\"Found message to destroy\", \"message\")\r\n self.cur_message.destroy()\r\n self.cur_message = None", "def purge_queue(client, queue):\n channel = client.channel()\n\n channel.queue_declare(queue=queue, durable=True, auto_delete=False)\n channel.queue_purge(queue)\n channel.close()", "def on_close(self):\n self.subscrib.unsubscribe(self.channel)\n self.thread.stop()", "def purge_mailbox(self):\n self._mailbox.clear()", "def unsubscribe(self):\n if self._subscribed and self._connected:\n try:\n msg = self._create_message(strings.UNSUB_MSG)\n self.write(msg)\n except (OSError, KeyError) as ex:\n _LOGGER.error(\n \"PyISY encountered a socket error while writing unsubscribe message to the socket: %s.\",\n ex,\n )\n self._subscribed = False\n self.disconnect()", "def stop_messenger(self):\n if self.connected:\n self.messenger.stop()\n self.connected = False", "def stop(self):\n self.running = False\n with self.lock:\n self.websockets.clear()\n self.poller.release()", "def purge(self):\n self._rpc(specification.Queue.Purge())", "def unregister(self):\n self._executor.unregister_publisher(self)", "def delete_queue(client, vhost, queue):\n client.delete_queue(vhost, queue)", "def clear_queue(self):\n\t\t\tself.message_queue.clear()\n\t\t\treturn self.message_queue", "async def unsubscribe(self, topic: str, subscription_id: int = None) -> None:\n ...", "def delete_sqs_message(sqs_queue_url, msg_receipt_handle):\r\n\r\n # Delete the message from the SQS queue\r\n sqs_client = boto3.client('sqs',region_name=\"us-east-1\")\r\n sqs_client.delete_message(QueueUrl=sqs_queue_url,\r\n ReceiptHandle=msg_receipt_handle)", "def cli(env, account_id, queue_name, message_id, force, datacenter, network):\n\n manager = SoftLayer.MessagingManager(env.client)\n mq_client = manager.get_connection(account_id,\n datacenter=datacenter, network=network)\n\n if message_id:\n mq_client.delete_message(queue_name, message_id)\n else:\n mq_client.delete_queue(queue_name, force)", "def cleanup(self):\n self.msgmap.clear()\n self.droppedmsgs.clear()\n self.chan.stop_receiving_messages()\n\n # TODO: enable\n #self.cmdMap.clear()\n #self.cmdCliSubmitQueue.clear()\n #self.cmdSvrComputeQueue.clear()\n #self.droppedCommands.clear()\n #self.ch.stop_receiving_commands()", "def stop(self):\n if self.running:\n log.info('Stopping sub process (pid {}).'.format(self.sub_process.pid))\n self.sub_process.terminate()\n self.sub_process.join()\n log.info('Stopped sub process (pid {}).'.format(self.sub_process.pid))\n self.daemon.cancel()\n log.info('Cancelled polling daemon for sub process {}.'.format(self.sub_process.pid))\n\n # Cleanup the stream\n log.info('Cleaning sub-process (pid {}).'.format(self.sub_process.pid))\n self.mp_queue.close()\n self.mp_queue = None\n self.sub_process = None\n self.daemon = None", "def deregister(self, queue, project=None):\n self._catalogue_ctrl.delete(project, queue)", "def desubscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = DeSubscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def delete_queue(queue_name: str, server_url: Optional[str] = None):\n rpc = RemoteProcedure(handle_QMF2_exception,\n 'qmf.default.direct', server_url)\n delete_queue_message = create_QMF2_method_invoke(\n get_broker_id(server_url),\n 'delete', {\n 'type': 'queue',\n 'name': queue_name\n }\n )\n rpc.call(delete_queue_message, timedelta(seconds=5))", "def __del__(self):\n\t\trospy.logdebug('MAVROSListener destruction')\n\t\t\n\t\tfor sub in self.__subs.values():\n\t\t\tsub.unregister()", "def delete_queue(self, queue_name: str) -> None:\n if queue_name is None:\n raise TypeError(\"Queue name cannot be None.\")\n\n with self.get_conn() as service_mgmt_conn:\n service_mgmt_conn.delete_queue(queue_name)", "async def cleanup(\n self, voice_client: Optional[discord.VoiceClient], guild: discord.Guild\n ):\n\n if voice_client:\n try:\n await voice_client.disconnect(force=True)\n except ValueError:\n # Raised from wavelink\n pass\n\n if guild.id in self.queue:\n queue = self.queue.pop(guild.id)\n queue.cleanup()\n del queue", "def unsubscribe(self, chanel_name):\n name = 'unsubscribe'\n\n self._send_websocket_request(name, chanel_name)", "def unsubscribe(cls,sender,receiver):\n cls._unsubscribe(id(sender),receiver)", "def shutdown(self):\n self.log.info(\"Purging event queue...\")\n self.kill_all()\n self.log.info(\"done\")\n self.log.info(\"Exiting...\")\n sys.exit()", "def deQueue(self):\n\t\tif self.isEmpty():\n\t\t\tprint(\"Queue already empty: Queue Empty\")\n\t\t\texit(1)\n\t\tprint(\"Dequeueing: \", self.queue[self.front])\n\t\tself.queue[self.front] = None\n\t\tself.front = self.front + 1\n\t\tself.size = self.size - 1", "def deregister(self, queue, project=None):\n self._invalidate_cached_id(queue, project)\n self._catalogue_ctrl.delete(project, queue)", "def _unsubscribe(self, signal):\n while signal in self._downstream:\n self._downstream.remove(signal)\n while signal in self._downstream_reconnect:\n self._downstream_reconnect.remove(signal)", "def stop(self):\n\t\tif self.__logging: self.__logger.debug('Terminating processes.')\n\t\t#terminate Threaded queue mode seperately\n\t\tif self.__threaded_queue_mode and not(self.__queue is None):\n\t\t\tif len(self.__queue)>0: self.__queue.clear()\n\t\t\tself.__threaded_queue_mode = False\n\t\t\tself.frame = None\n\n\t\t# indicate that the thread should be terminate\n\t\tself.__terminate = True\n\n\t\t# wait until stream resources are released (producer thread might be still grabbing frame)\n\t\tif self.__thread is not None:\n\t\t\tself.__thread.join()\n\t\t\t#properly handle thread exit\n\t\t\tif self.__youtube_mode:\n\t\t\t\t# kill thread-lock in youtube mode\n\t\t\t\tself.__thread = None", "def __del__(self):\n self._proc.kill()", "def stopzmq(self):\n\n self.context.destroy()", "def unsubscribe(self, id):\n self._signal_pool_uids.pop(id)\n self._signal_pool.unsubscribe(id)", "def __del__(self):\n\n if self._needs_release:\n send_message(self, \"release\", restype=objc_id, argtypes=[])", "def destroy(self):\r\n\t\tsuper().destroy()\r\n\t\tif comm_server.is_running():\r\n\t\t\tcomm_server.remove_disconnection_handler(self._update_connection_num)\r\n\t\t\tcomm_server.stop_server()", "def __del__(self):\n\n # Disconnect from MQTT\n if self.__mqtt_connection:\n try:\n self.__mqtt_connection.disconnect()\n except:\n pass\n\n # Disconnect from all Bluetooth devices\n for con in self.__bluetooth_connections.values():\n try:\n con.disconnect()\n except:\n pass\n\n # Stop Bluetooth service\n if self.__bluetooth_adapter:\n self.__bluetooth_adapter.stop()", "def stop(self):\n self.logger.info(\"Stopping messenger.\")\n self.running = False", "def _async_unsubscribe(self, topic: str) -> None:\n if self._is_active_subscription(topic):\n if self._max_qos[topic] == 0:\n return\n subs = self._matching_subscriptions(topic)\n self._max_qos[topic] = max(sub.qos for sub in subs)\n # Other subscriptions on topic remaining - don't unsubscribe.\n return\n if topic in self._max_qos:\n del self._max_qos[topic]\n if topic in self._pending_subscriptions:\n # Avoid any pending subscription to be executed\n del self._pending_subscriptions[topic]\n\n self._pending_unsubscribes.add(topic)\n self._unsubscribe_debouncer.async_schedule()", "def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]", "def destroy(self):\n\n raise imap4.MailboxException(\"Permission denied.\")", "def destroy(self):\n del Stream._streams[self.__id]\n self.__network._send(_Messages.stream_destroy, self.__id, False)", "async def remove(self):\n\n await self.VoiceClient.http.removeQueueSource(self.tag)\n\n return self", "def clear_messages(self):\n self.redis_client.delete(self.message_list)", "def Destroy(self):\n self.Disconnected()\n self._io_loop.remove_handler(self._fd)\n os.close(self._fd)\n self._gadget = None\n self._fd = None", "def stop(self):\r\n self._incompleteMsgs = set()\r\n\r\n if self._cleaner.running:\r\n self._cleaner.stop()", "async def unsubscribe_topics(self) -> None:\n self._sub_state = await self._mqtt_client.unsubscribe(self._sub_state)", "def unsubscribe(endpoint: str, topic: str, timeout: int = 5):\n global logger\n logger.info(f\"Unsubscribing from topic '{topic}' ...\")\n action = {\"action\": \"unsubscribe\", \"topic\": topic}\n reply = send_manage_message(endpoint, action, timeout)\n if not reply_is_success(reply):\n logger.warning(\"Unsubscription failed\")\n return\n logger.info(\"Unsubscription successful\")", "def destroy(self):\r\n self.__destroy()", "def tearDownClass(cls):\n cls.producer.channel.queue_purge(queue='files_to_database')\n cls.producer.channel.close()", "def purge_queue(queue_name: str,\n limit: int = 0,\n message_filter: Optional[Tuple[str, str]] = None,\n server_url: Optional[str] = None):\n queue = get_object('org.apache.qpid.broker', 'queue', queue_name,\n server_url)\n method_arguments = {'request': limit} # type: dict\n if message_filter:\n method_arguments['filter'] = _build_message_filter(*message_filter)\n\n rpc = RemoteProcedure(handle_QMF2_exception,\n 'qmf.default.direct', server_url)\n rpc.call(create_QMF2_method_invoke(queue['_object_id'],\n 'purge', method_arguments),\n timedelta(seconds=5))", "def remote_destroy(self):\r\n if self._receivers:\r\n for interface in reduce(set.union, self._receivers.itervalues()):\r\n interface.unregisterProtocol(self)\r\n\r\n self._receivers = None\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterProtocol(self)\r\n self._endpoint = None", "def destroy(self):\r\n self._protocol.destroy()\r\n\r\n self._protocol = None", "def terminate(self):\n self.mailQueue.put(None)\n self.mailQueue.join()\n self.join()\n self.logger.info(\"Mailer terminated\")", "def cleanup(self):\n # Removing the ROS system wide advert about which topic are interfaced with this process\n # TODO : lock this for concurrent access\n if_topics = rospy.get_param('~' + TopicBack.IF_TOPIC_PARAM, [])\n if_topics.remove(self.fullname)\n rospy.set_param('~' + TopicBack.IF_TOPIC_PARAM, if_topics)\n\n # cleanup pub and sub, so we can go through another create / remove cycle properly\n self._remove_pub(self.pub)\n self._remove_sub(self.sub)", "async def _send_message_for_unsubscribe(self, data_id):\n body = json.dumps(\n dict(\n action='unsub',\n data_id=data_id\n )\n ).encode('utf-8')\n await self._send_message_in_queue(self.queue_crypto_quotes_service, body)", "def unsubscribe(self, subject):\n pass", "def deregister(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def UnsubscribeFromRedeemingQueueUpdatedEvent(self, callBackFunction):\n self.redeemingQueueUpdated.disconnect(callBackFunction)", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def unsubscribe(self, destination, extra_headers=None):\n unsubscribe = frame.UnsubscribeFrame(destination, extra_headers=extra_headers)\n res = self.send_frame(unsubscribe)\n with self.subscription_lock:\n self.subscribed_destinations.pop(destination)\n return res", "def UnregisterMessageHandler(self, timeout=None):\n if self.handler_thread:\n self.handler_stop = True\n self.handler_thread.join(timeout)\n if self.handler_thread.is_alive():\n raise RuntimeError(\"Message handler thread did not join in time.\")\n self.handler_thread = None", "def exit_queue(self, name=None):\r\n if(name):\r\n self.log.debug(\"EXITING queue: (%s)\" % (name))\r\n self._queues[name].release()\r\n self.log.debug(\"SUCCESS EXITING queue: (%s)\" % (name))", "async def job_remove(self, uid):\n self._require_running()\n job = self._get_job(uid)\n await job.close()\n del self._jobs[uid]\n del self._jobs_by_connection[job.sender.connection][uid]\n if len(self._jobs_by_connection[job.sender.connection]) == 0:\n del self._jobs_by_connection[job.sender.connection]\n self._log.debug('Removed job %s', job)", "def destroy_event(self, event_type):\n for func in self.event_subscribers[event_type][:]:\n self.unsubscribe(event_type, func)\n self.unregister_event_types(event_type)", "def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)", "def queue_delete(queue):\n\n for job in queue.jobs:\n job_delete(job)\n if os.path.exists(queue.data_abspath):\n os.rmdir(queue.data_abspath)\n db.session.delete(queue)\n db.session.commit()", "def delete_message(self, receipt_handle):\n try:\n self.sqs_client.delete_message(\n QueueUrl=self.sqs_queue,\n ReceiptHandle=receipt_handle,\n )\n except Exception as e:\n logging.info(\n f\"failed to delete msg with handle '{receipt_handle}' \"\n f\"error: {e}\"\n )", "async def async_will_remove_from_hass(self) -> None:\n async_unsubscribe_topics(self.hass, self._sub_state)\n self._sub_state = None", "def __del__(self):\n if self.running:\n self.stop()", "def unsubscribe(self, event_handler):\n pass # pragma: no cover", "def __del__(self):\n self.destroy()", "def __del__(self):\n try:\n pybullet.disconnect(physicsClientId=self._client)\n except pybullet.error:\n pass", "def unsubscribe(self, topic):\n request = protos.RequestUnsubscribe(topic=topic)\n return self.stub.unsubscribe(request)", "def clean_queue(self):\n self._stdin_queue.put_nowait(None) # Release thread", "def clearQueue(self, queue_name, project_id=None):\n if project_id is None:\n project_id = self.project_id\n\n url = \"%sprojects/%s/queues/%s/clear?oauth=%s\" % (self.url, project_id, queue_name, self.token)\n body = self.__post(url)\n return json.loads(body)", "def stop(self):\n if self._is_running():\n self._stop_event.set()\n\n for process in self._processes:\n if process.is_alive():\n os.kill(process.pid, signal.SIGINT)\n process.join()\n\n if self._queue is not None:\n self._queue.close()\n\n self._queue = None\n self._stop_event = None\n self._processes = []", "def stop(self):\n if self._is_running():\n self._stop_event.set()\n\n for process in self._processes:\n if process.is_alive():\n os.kill(process.pid, signal.SIGINT)\n process.join()\n\n if self._queue is not None:\n self._queue.close()\n\n self._queue = None\n self._stop_event = None\n self._processes = []", "def clear(self):\r\n try:\r\n while not self._queue.empty():\r\n self._queue.get().close()\r\n except:\r\n pass", "def unregisterProducer():", "def stop(self):\n with self._state_change:\n if not self._running:\n return\n\n self._running = False\n\n for queue in (self.callback_queue,):\n queue.put(_STOP)\n\n while self._workers:\n worker = self._workers.pop()\n worker.join()\n\n # Clear the queues\n self.callback_queue = self.queue_impl() # pragma: nocover\n\n python2atexit.unregister(self.stop)" ]
[ "0.7262633", "0.6581309", "0.6575992", "0.6442243", "0.6403433", "0.63751954", "0.63708746", "0.63472867", "0.6332669", "0.6327779", "0.6296495", "0.6264714", "0.6237197", "0.61690885", "0.61690885", "0.61690885", "0.61690885", "0.61690885", "0.61083573", "0.6106147", "0.6098599", "0.6095181", "0.6090346", "0.6051243", "0.6048008", "0.5994001", "0.5989376", "0.59489375", "0.5945131", "0.5913578", "0.58995616", "0.58988", "0.58899623", "0.5858336", "0.58526963", "0.5850172", "0.58445793", "0.5838402", "0.58285373", "0.5802586", "0.5776407", "0.57693976", "0.57605004", "0.57552725", "0.575253", "0.5738076", "0.57302624", "0.57242835", "0.5720559", "0.5720533", "0.5706047", "0.5695065", "0.5688868", "0.56855816", "0.56792337", "0.56542057", "0.5648937", "0.5643742", "0.5643366", "0.56417936", "0.5638078", "0.5630944", "0.5609185", "0.5605381", "0.5604132", "0.56014067", "0.55973226", "0.5588337", "0.55823475", "0.55818033", "0.5576341", "0.5562819", "0.5559097", "0.5549355", "0.55491084", "0.5544239", "0.55355453", "0.5533292", "0.5533292", "0.55297834", "0.55260533", "0.5523933", "0.5512941", "0.5507158", "0.5505386", "0.5503657", "0.5498014", "0.5489303", "0.54882795", "0.5473348", "0.54716396", "0.5469291", "0.54680645", "0.54588884", "0.54577047", "0.5454588", "0.5454588", "0.54493564", "0.54409695", "0.5439424" ]
0.8235168
0
Setup shapes and sprites (if we had any) and initialise the game class
def setup(self): # Create your sprites and sprite lists here self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4) self.game.game_message = "Lead the Rabbit home" # show the menu so that we see the instructions self.game.menu.button_list[0].text = "Start" self.game.menu.is_visible = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self):\n\n # Create the Sprite lists\n self.sprite_list = arcade.SpriteList()\n\n r = 60\n for x in rand_range(0, 100 * math.pi, scale=math.pi / 5):\n star = arcade.Sprite(\"../../resources/arcade/gold_1.png\")\n star.center_x = SCREEN_WIDTH / 2 + r * math.cos(x)\n star.center_y = SCREEN_HEIGHT / 2 + r * math.sin(x)\n star.seed = scale_generator(x=random() * math.pi, offset=.5, step=.01)\n star.scale = next(star.seed)\n self.sprite_list.append(star)\n r += 3", "def setUp(self):\r\n pos =[0,0]\r\n vel = [0,0]\r\n ang = 0.0\r\n ang_vel= [0,0]\r\n image = None\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.sprite = Sprite(pos, vel, ang, ang_vel, image, info)", "def setUp(self):\r\n pos =[0,0]\r\n vel = [0,0]\r\n ang = 0.0\r\n ang_vel= [0,0]\r\n image = None\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.sprite = Sprite(pos, vel, ang, ang_vel, image, info)", "def setup(self):\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList(use_spatial_hash=True,\n spatial_hash_cell_size=128)\n self.enemy_list = arcade.SpriteList()\n\n # Set up the player\n resource = \":resources:images/animated_characters/\" \\\n \"female_person/femalePerson_idle.png\"\n self.player = arcade.Sprite(resource, scale=SPRITE_SCALING)\n self.player.center_x = SPRITE_SIZE * 5\n self.player.center_y = SPRITE_SIZE * 1\n self.player_list.append(self.player)\n\n # Set enemies\n resource = \":resources:images/animated_characters/zombie/zombie_idle.png\"\n enemy = arcade.Sprite(resource, scale=SPRITE_SCALING)\n enemy.center_x = SPRITE_SIZE * 4\n enemy.center_y = SPRITE_SIZE * 7\n self.enemy_list.append(enemy)\n\n spacing = SPRITE_SIZE * 3\n for column in range(10):\n for row in range(15):\n sprite = arcade.Sprite(\":resources:images/tiles/grassCenter.png\",\n scale=SPRITE_SCALING)\n\n x = (column + 1) * spacing\n y = (row + 1) * sprite.height\n\n sprite.center_x = x\n sprite.center_y = y\n if random.randrange(100) > 30:\n self.wall_list.append(sprite)\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player,\n self.wall_list)\n\n # --- Path related\n # This variable holds the travel-path. We keep it as an attribute so\n # we can calculate it in on_update, and draw it in on_draw.\n self.path = None\n # Grid size for calculations. The smaller the grid, the longer the time\n # for calculations. Make sure the grid aligns with the sprite wall grid,\n # or some openings might be missed.\n grid_size = SPRITE_SIZE\n\n # Calculate the playing field size. We can't generate paths outside of\n # this.\n playing_field_left_boundary = -SPRITE_SIZE * 2\n playing_field_right_boundary = SPRITE_SIZE * 35\n playing_field_top_boundary = SPRITE_SIZE * 17\n playing_field_bottom_boundary = -SPRITE_SIZE * 2\n\n # This calculates a list of barriers. By calculating it here in the\n # init, we are assuming this list does not change. In this example,\n # our walls don't move, so that is ok. If we want moving barriers (such as\n # moving platforms or enemies) we need to recalculate. This can be an\n # time-intensive process depending on the playing field size and grid\n # resolution.\n\n # Note: If the enemy sprites are the same size, we only need to calculate\n # one of these. We do NOT need a different one for each enemy. The sprite\n # is just used for a size calculation.\n self.barrier_list = arcade.AStarBarrierList(enemy,\n self.wall_list,\n grid_size,\n playing_field_left_boundary,\n playing_field_right_boundary,\n playing_field_bottom_boundary,\n playing_field_top_boundary)", "def setup_scene(self):\n\n # read map\n options, landscapes, statics, dynamics, trees, hero, hare = read_map('test.map')\n self.num_of_blocks_X, self.num_of_blocks_Y = options['size']\n with self.canvas:\n # init landscapes\n block_x = 0\n for i in xrange(self.num_of_blocks_X):\n block_y = 0\n for j in xrange(self.num_of_blocks_Y):\n class_name = landscapes[i][j]\n if class_name is not None:\n clazz = eval(class_name.capitalize())\n else:\n clazz = Grass\n block = clazz(pos=(block_x, block_y),\n size=(self.block_width, self.block_height), border=(0, 0))\n self.blocks[i][j] = block\n block_y += self.block_height \n block_x += self.block_width\n\n # init dynamics\n for x, y, class_name in dynamics:\n if 'dynamics_as_blocks' in options and options['dynamics_as_blocks']:\n x, y = (x + 0.5) * self.block_width, (y + 0.5) * self.block_height\n eval(class_name.capitalize())(x, y)\n \n with self.canvas:\n # draw or hero\n HeroRabbit(BLOCK_SIZE[0]*(hero[0] + 0.5), BLOCK_SIZE[1]*(hero[1] + 0.5))\n Hare(BLOCK_SIZE[0]*(hare[0] + 0.5), BLOCK_SIZE[1]*(hare[1] + 0.5))\n\n # init statics\n def _is_mountain(i, j):\n return int(0 <= i < self.num_of_blocks_X and 0 <= j <= self.num_of_blocks_Y and\n statics[i][j] == 'mountain')\n\n def _get_mountain_type(i, j):\n opensides = (_is_mountain(i - 1, j), _is_mountain(i, j + 1),\n _is_mountain(i + 1, j), _is_mountain(i, j - 1)) # left, top, right, bottom\n opensides_to_type = {\n (1, 1, 1, 1): 'center',\n (1, 0, 1, 0): 'horizontal_center',\n (0, 1, 0, 1): 'vertical_center',\n (1, 0, 0, 0): 'horizontal_right',\n (0, 1, 0, 0): 'vertical_bottom',\n (0, 0, 1, 0): 'horizontal_left',\n (0, 0, 0, 1): 'vertical_top',\n }\n return opensides_to_type.get(opensides, 'horizontal_center')\n \n _mountains = []\n _bushes= []\n \n for i in xrange(self.num_of_blocks_X):\n for j in xrange(self.num_of_blocks_Y):\n class_name = statics[i][j]\n if class_name is not None:\n pos = (i + 0.5) * self.block_width, (j + 0.5) * self.block_height\n if class_name == 'bush':\n #Bush(*pos)\n _bushes.append(pos)\n elif class_name == 'mountain':\n _mountains.append((pos, _get_mountain_type(i, j)))\n #Mountain(*pos, type=_get_mountain_type(i, j))\n \n for tree_pos in trees:\n Tree(BLOCK_SIZE[0]*(tree_pos[0] + 0.5), BLOCK_SIZE[1]*(tree_pos[1] + 0.5))\n \n with self.canvas:\n for pos in _bushes:\n Bush(*pos)\n \n for pos, type in _mountains:\n Mountain(*pos, type=type)\n\n HolyCarrot(13.5*self.block_width, 7.5*self.block_height)\n # This should be called at the end\n self.reindex_graphics()", "def __init__ (self, game):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n self.game_ref = game\r\n self.initialise()", "def setup(self):\n \n self.explosions_list = arcade.SpriteList()\n\n self.explosion_texture_list = []\n \n # Set up the score\n self.score = 0\n self.countdown = 1000\n\n for i in range(EXPLOSION_TEXTURE_COUNT):\n # Files from http://www.explosiongenerator.com are numbered sequentially.\n # This code loads all of the explosion0000.png to explosion0270.png files\n # that are part of this explosion.\n texture_name = f\"images/explosion/explosion{i:04d}.png\"\n\n self.explosion_texture_list.append(arcade.load_texture(texture_name))\n \n # create 10 balls\n for i in range(10):\n myball = make_ball()\n self.ball_list.append(myball)", "def setup(self):\n\n self.characters = arcade.SpriteList()\n self.dungeon_sprites = arcade.SpriteList(\n use_spatial_hash=True, spatial_hash_cell_size=16\n )\n\n self.player = Item(ord(\"@\"), arcade.csscolor.WHITE)\n self.player.x = 0\n self.player.y = 0\n self.characters.append(self.player)\n\n # Size of the map\n map_width = MAP_WIDTH\n map_height = MAP_HEIGHT\n\n # Some variables for the rooms in the map\n room_max_size = 10\n room_min_size = 6\n max_rooms = 30\n\n self.game_map = GameMap(map_width, map_height)\n self.game_map.make_map(\n max_rooms, room_min_size, room_max_size, map_width, map_height, self.player\n )\n\n # Draw all the tiles in the game map\n for y in range(self.game_map.height):\n for x in range(self.game_map.width):\n wall = self.game_map.tiles[x][y].block_sight\n sprite = Item(WALL_CHAR, arcade.csscolor.BLACK)\n if wall:\n sprite.block_sight = True\n else:\n sprite.block_sight = False\n\n sprite.x = x\n sprite.y = y\n\n self.dungeon_sprites.append(sprite)\n\n recalculate_fov(\n self.player.x, self.player.y, FOV_RADIUS, self.dungeon_sprites\n )", "def setup(self):\n # Create your sprites and sprite lists here\n self.wall_list = arcade.SpriteList()\n for x in range(128, SCREEN_WIDTH, 196):\n for y in range(128, SCREEN_HEIGHT, 196):\n wall = arcade.Sprite(\"building.png\",.3)\n wall.center_x = x\n wall.center_y = y\n # wall.angle = 45\n self.wall_list.append(wall)\n self.player_sprite = arcade.Sprite(\"taxi.png\")\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 50\n self.player_sprite.scale = .2\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n #Spawns people and makes list\n self.person = arcade.Sprite(\"person.png\")\n self.person.scale = .2\n self.person.center_x = random.randrange(SCREEN_WIDTH)\n self.person.center_y = random.randrange(SCREEN_HEIGHT)\n #Spawns target\n self.target = arcade.Sprite(\"target.png\")\n self.target.scale = .5\n self.target.center_x = random.randrange(60,SCREEN_WIDTH)\n self.target.center_y = random.randrange(60,SCREEN_HEIGHT)\n color_list = [\"BLUE\",\"RED\"]", "def setup(self):\n\n # Sprite lists\n self.all_sprite_list = arcade.SpriteList()\n self.player_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.deathclaw_list = arcade.SpriteList()\n # Score\n self.score = 0\n\n # Set up the player\n # Character image from https://www.pngkit.com/\n self.player_sprite = arcade.Sprite(\"Vault_Boy.png\", SPRITE_SCALING_PLAYER)\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 50\n self.player_list.append(self.player_sprite)\n\n # Create the ENEMY\n for i in range(DEATHCLAW_COUNT):\n\n # Create the ENEMY instance\n # ENEMY image from https://fallout.fandom.com/\n deathclaw = Deathclaw(\"Deathclaw.png\", SPRITE_SCALING_DEATHCLAW)\n\n # Position the coin\n deathclaw.center_x = random.randrange(SCREEN_WIDTH)\n deathclaw.center_y = random.randrange(SCREEN_HEIGHT)\n\n # Add the coin to the lists\n self.deathclaw_list.append(deathclaw)\n\n for i in range(COIN_COUNT):\n\n # Create the Nuka Cola instance\n # Nuka Cola image fromh https://www.cleanpng.com/free/nuka-cola.html\n coin = Coin(\"Nuka_Kola.png\", SPRITE_SCALING_COIN)\n\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(SCREEN_HEIGHT)\n\n # Add the coin to the lists\n self.coin_list.append(coin)", "def setup(self):\n # Set up the player\n self.player_sprite = arcade.Sprite(\"Sprites/Jugador/Jugador.jpg\", SPRITE_SCALING)\n self.player_sprite.center_x = 100\n self.player_sprite.center_y = 100\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n # Listado de habitaciones\n self.rooms = []\n self.rooms.append(setup_pueblo())\n\n #Contador de habitación\n self.current_room = 0\n\n #Fisicas\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height)\n )\n pygame.display.set_caption(\"Sideways Shooter\")\n self.stats = GameStats(self)\n self.sideways_ship = SidewaysShip(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()", "def __init__(self):\n self.wall_list = pygame.sprite.Group()\n self.enemy_sprites = pygame.sprite.Group()", "def __init__(self, player):\n self.wall_list = pygame.sprite.Group()\n self.enemy_list = pygame.sprite.Group()\n self.sludge = pygame.sprite.Group()\n self.consumeable = pygame.sprite.Group()\n self.can_climb = pygame.sprite.Group()\n self.player = player\n self.spore_list = [Decompose_Spore, Ledge_Spore]\n self.active_spore = self.spore_list[0]\n \n # Background image\n self.background = None", "def __init__(self, screen_size, grid_size):\n super(MainScreen, self).__init__(screen_size)\n self.gamegrid = QuadraticGrid(grid_size[0], grid_size[1])\n self.grid_width = grid_size[0]\n self.grid_height = grid_size[1]\n self.block_width = screen_size[0] / grid_size[0]\n self.block_height = screen_size[1] / grid_size[1]\n print str(self.block_width) + \" \" + str(self.block_height)\n \n self.game_model = GameModel(grid_size)\n self.dragon_group = pygame.sprite.Group()\n self.gun_group = pygame.sprite.Group()\n self.hat_group = pygame.sprite.Group()", "def __init__(self):\n pygame.init()\n # Assign surface i.e where game elements can be displayed\n self.settings = Settings()\n # Enable Full screen mode\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.settings.screen_width = self.screen.get_rect().width\n self.settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Oyugo's Space Invasion\")\n self.bg_colour = (self.settings.bg_colour)\n self.stats = GameStats(self)\n self.sb = Scoreboard(self)\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group()\n self.special_bullet = pygame.sprite.Group()\n self.bomb_status = False\n self.stars = pygame.sprite.Group()\n self._create_galaxy()\n self.play_button = Button(self, \"Play\")", "def setup(self):\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n\n # https://opengameart.org/content/animated-top-down-survivor-player\n # Set up the player\n self.player_sprite = Player(\"survivor-idle_rifle_0.png\", 0.5)\n self.player_sprite.center_x = SCREEN_WIDTH / 2\n self.player_sprite.center_y = SCREEN_HEIGHT / 2\n self.player_list.append(self.player_sprite)\n self.wall_list = arcade.SpriteList()\n self.chest_list = arcade.SpriteList()\n self.bullet_list = arcade.SpriteList()\n\n self.score = 0\n\n\n # Set up the player\n # https://opengameart.org/content/animated-top-down-survivor-player\n\n\n\n # -- Set up several columns of walls\n for x in range(-700, 1700, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = x\n wall.center_y = -300\n self.wall_list.append(wall)\n for x in range(-700, 1700, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = x\n wall.center_y = 1025\n self.wall_list.append(wall)\n for y in range(-300, 1025, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = -700\n wall.center_y = y\n self.wall_list.append(wall)\n for y in range(-300, 1025, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = 1700\n wall.center_y = y\n self.wall_list.append(wall)\n\n\n# https://www.pinterest.com/pin/258042253625289337\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.wall_list)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BRITISH_RACING_GREEN)\n\n # Set the viewport boundaries\n # These numbers set where we have 'scrolled' to.\n self.view_left = 0\n self.view_bottom = 0", "def __init__(self):\n self.monsters_images = pg.sprite.Group()\n self.font_23 = pg.font.Font(prepare.FONTS['Timeless-Bold'], 23)\n self.font_20 = pg.font.Font(prepare.FONTS['Timeless'], 20)\n self.font_18 = pg.font.Font(prepare.FONTS['Timeless'], 18)\n self.bold_font = pg.font.Font(prepare.FONTS['Timeless-Bold'], 17)\n self.font_15 = pg.font.Font(prepare.FONTS['Timeless'], 15)\n\n self.init_left_zone()\n self.init_middle_zone()\n self.init_right_zone()", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.joys = initialize_all_gamepads()\n self.done = False\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.cannon = Turret(self.joys[0], (250,250))\n self.objects = pg.sprite.Group()", "def game_initialize():\n global SURFACE_MAIN #global vars will be all caps\n # init pygame\n pygame.init()\n SURFACE_MAIN = pygame.display.set_mode((constants.GAME_WIDTH, constants.GAME_HEIGHT))\n constants.initialize_sprites()", "def setup(self):\n arcade.set_background_color(BACKGROUND_COLOR)\n\n self.sprite_list.append(self.ada)\n self.sprite_list.append(self.potato)", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n\n #self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n #self.settings.screen_width = self.screen.get_rect().width\n #self.settings.screen_height = self.screen.get_rect().height\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height ))\n pygame.display.set_caption(\"Alien Invasion\")\n\n # Create an instance to store game stats.\n self.stats = GameStats(self)\n # Create scoreboard\n self.scoreboard = Scoreboard(self)\n\n # Create objects\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()\n\n # Make play button\n self.play_button = Button(self, \"Play\")", "def setup(self):\n self.star_list = arcade.SpriteList()\n\n for i in range(50):\n # Create snowflake instance\n singlestar = Singlestar()\n # Add snowflake to snowflake list\n self.star_list.append(singlestar)\n\n # Don't show the mouse pointer\n self.set_mouse_visible(False)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def setup_play(self, reset=False):\n\n if reset:\n for sprite in self.all_sprites:\n sprite.kill()\n else:\n self.score = 0\n self.lives = START_LIVES\n self.game_state = InGameState.READY\n\n self.all_sprites = pg.sprite.Group()\n self.walls = pg.sprite.Group()\n self.blocks = pg.sprite.Group()\n self.diamonds = pg.sprite.Group()\n self.moving_blocks = pg.sprite.Group()\n self.enemies = pg.sprite.Group()\n self.stunned_enemies = pg.sprite.Group()\n\n # level = Level(path.join(level_dir, '1.txt'))\n level = Level(path.join(level_dir, 'c64_level1.txt'))\n level.load_level(self)\n LOGGER.debug(f\"No. enemies: {len(self.enemies)}, No. blocks: {len(self.blocks)}\")\n\n self.make_boundary_wall(level.grid_height, level.grid_width)\n\n self.timer = TIME_LIMIT\n pg.time.set_timer(TIMER, 1000)\n\n self.target_no_kills = 5\n self.kill_bonus = None\n self.diamond_bonus = None", "def __init__(self):\n pygame.init() # intializes background settings\n self.settings = Settings()\n\n # the self.screen obj creates a `surface` that represents game screen where elements can be drawn\n ### run in 1200 x 800 mode\n self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height)) \n \n ### run in fullscreen mode\n # self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n # self.settings.screen_width = self.screen.get_rect().width\n # self.settings.screen_height = self.screen.get_rect().height\n\n pygame.display.set_caption(\"Alien_Invasion\")\n\n # Create instance of game statistics & scoreboard\n self.stats = GameStats(self)\n self.sb = Scoreboard(self)\n\n # the self.ship instance is assigned to give Ship access to all game resourses via self parameter\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group() # similar to a list with extra features\n\n # create instance of alien\n self.aliens = pygame.sprite.Group()\n self._create_fleet()\n\n # Create a Play button\n self.play_button = Button(self, \"Play !\")", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n\n self.ship = Ship(self)\n self.ufos = pygame.sprite.Group()\n self.missiles = pygame.sprite.Group()\n\n # self.available_width = self.settings.screen_width\n # self.available_ufos = (self.available_width // (self.ufo_example.rect.width * 2))\n # self.available_height = self.settings.screen_height\n # self.available_rows = (self.available_height // (self.ufo_example.rect.height * 2) // 2)\n\n self.initial_ufo_location = 0", "def __init__(self):\n\t\tpygame.init()\n\t\tself.settings = Settings()\n\n\t\tself.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\t\tself.settings.screen_width = self.screen.get_rect().width \n\t\tself.settings.screen_height = self.screen.get_rect().height\n\t\tpygame.display.set_caption(\"Pigeon Drop!\")\n\n\t\t# Create an instance to store game statistics,\n\t\t# and create a scoreboard.\n\t\tself.stats = GameStats(self)\n\t\tself.sb = Scoreboard(self)\n\n\t\tself.pigeon = Pigeon(self)\n\t\tself.droppings = pygame.sprite.Group()\n\t\tself.autos = pygame.sprite.Group()\n\n\t\tself._create_fleet()\n\n\t\t# Make the Play button.\n\t\tself.play_button = Button(self, \"Play\")", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def __init__(self, width, height):\n super().__init__(width, height)\n arcade.set_background_color(arcade.color.SMOKY_BLACK)\n\n self.held_keys = set()\n\n \n # TODO: declare anything here you need the game class to track\n self.ship = Ship()\n self.asteroid_array = []\n self.bullets_list = []\n self.create_asteroids()", "def __init__(self, width, height):\n Game.__init__(self, width, height)", "def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0", "def __init__(self, x, y, opponent):\n super().__init__()\n root = os.path.dirname(os.path.realpath(__file__))\n path = root + utils.sep\n\n self.sprites = []\n if opponent == True:\n self.sprites.append(pygame.image.load(path + \"opponent_00.png\"))\n self.sprites.append(pygame.image.load(path + \"opponent_01.png\"))\n self.sprites.append(pygame.image.load(path + \"opponent_02.png\"))\n self.sprites.append(pygame.image.load(path + \"opponent_03.png\"))\n self.sprites.append(pygame.image.load(path + \"opponent_04.png\"))\n self.sprites.append(pygame.image.load(path + \"opponent_05.png\"))\n self.sprites.append(pygame.image.load(path + \"opponent_06.png\"))\n else:\n self.sprites.append(pygame.image.load(path + \"bike_00.png\"))\n self.sprites.append(pygame.image.load(path + \"bike_01.png\"))\n self.sprites.append(pygame.image.load(path + \"bike_02.png\"))\n self.sprites.append(pygame.image.load(path + \"bike_03.png\"))\n self.sprites.append(pygame.image.load(path + \"bike_04.png\"))\n self.sprites.append(pygame.image.load(path + \"bike_05.png\"))\n self.sprites.append(pygame.image.load(path + \"bike_06.png\"))\n self.stage = 0\n self.image = self.sprites[self.stage]\n self.X = x\n self.Y = y\n self.rect = self.image.get_rect(center=(self.X, self.Y))\n\n self.angle = 0", "def __init__(self):\n #Screen settings\n self.screen_width=1200\n self.screen_height=800\n self.bg_color=(230,230,230)\n #ship settings\n self.ship_limit=1\n #bullet settings\n self.bullet_width=300\n self.bullet_height=15\n self.bullet_color=(60,60,60)\n self.bullets_allowed=3\n #Alien settings\n self.fleet_drop_speed = 20\n \n \n #how quickly the game speeds up\n self.speedup_scale=1.1\n #how quickly the point values increase\n self.score_scale=1.5\n \n self.initialize_dynamic_settings()", "def __init__(self):\n\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n self.image = PEOPLE\n\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def setup(self):\n self.player = arcade.Sprite(\"tank.png\", 0.5)\n self.num_coins = 30\n\n # Initialize brick_list as a SpriteList\n # Then use nested for loops to place rows of bricks. \n # (Hint: Outer for loop is y coordinate skipping every 150 pixels\n # Inner for loop is x coordinate skipping every 64 pixels)\n # Append bricks to brick_list\n\n\n\n\n \n # Initialize coin_list as a SpriteList\n self.coin_list = arcade.SpriteList()\n \n # PSEUDOCODE for how to place coins. \n # for each i in range of number of coins \n # create coin Sprite\n # set boolean variable successfully_placed to False\n # while not successfully_placed\n # set center_x and center_y randomly\n # compute collision lists for coin with bricks\n # AND coin with other coins (2 lists)\n # if both lists have 0 length, then we have successfully placed the coin\n # add coin to coin_list\n \n \n \n \n \n \n \n # initialize physics_engine", "def __init__(self):\n #Screen settings:\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (51,153,255)\n\n #Ship settings:\n self.ship_speed_factor = 25\n\n #Bullet settings:\n self.bullet_width = 50\n self.bullet_height = 5\n self.bullet_color = 60,60,60\n self.bullets_allowed = 5\n\n #Target settings:\n self.target_direction = 1 #Works like a flag. 1 represents down, -1 represents up.\n\n #Missed shots allowed\n self.misses_starting_with = 3\n\n #Increases the speed of the target by this factor each time it is hit\n self.speedup_scale = 1.2\n\n #Initializes the dynamic settings\n self.initialize_dynamic_settings()", "def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen", "def setup(self):\n\n # Used to keep track of our scrolling\n self.view_bottom = 0\n self.view_left = 0\n\n # Keep track of the score\n self.score = 0\n\n # Create the Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n\n # Set up the player, specifically placing it at these coordinates.\n # image_source = \":resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png\"\n self.player_list = arcade.SpriteList()\n self.player_sprite = Player()\n self.player_sprite.center_x = 256\n self.player_sprite.center_y = 256\n self.player_list.append(self.player_sprite)\n\n # --- Load in a map from the tiled editor ---\n\n # Name of map file to load\n map_name = r\"Math_Game\\floor_is_lava.tmx\"\n # Name of the layer in the file that has our platforms/walls\n platforms_layer_name = 'Platforms'\n\n # Read in the tiled map\n my_map = arcade.tilemap.read_tmx(map_name)\n\n # -- Platforms\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\n layer_name='Platforms',\n base_directory=r'C:\\Users\\katel\\Desktop\\CSE310\\group_project\\Math_Game\\platformer-art-complete-pack-0\\Base pack\\Tiles',\n scaling=TILE_SCALING,\n use_spatial_hash=True, hit_box_algorithm=\"Simple\", hit_box_detail=4.5)\n\n # --- Other stuff\n # Set the background color\n if my_map.background_color:\n arcade.set_background_color(my_map.background_color)\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\n self.wall_list,\n GRAVITY)", "def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)", "def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)", "def __init__(self):\n\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.image.load('assets/' + 'singleLaser.png')\n\n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()\n self.rect.center = (settings.SCREEN_WIDTH / 2, settings.SCREEN_HEIGHT / 2)", "def run(self):\n pygame.init()\n pygame.display.set_caption(\"Genetic Game\")\n self.screen = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H), 0, 32)\n\n self.ominus_sprites = [OminusSprite(self.screen, o, PLAYERS_COLORS[o.id]) for o in self.model.get_players()]\n for o in self.ominus_sprites:\n self.agent_group.add(o)\n\n self.wall_sprites = [WallSprite(self.screen, w) for w in self.model.get_walls()]\n for w in self.wall_sprites:\n self.terrain_group.add(w)", "def __init__(self, sprite):\n self.sprite = sprite", "def __init__(self, player):\n self.platform_list = pygame.sprite.Group()\n self.enemy_list = pygame.sprite.Group()\n self.player = player\n \n \n # Background image\n self.background = None", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def __init__(self):\n pygame.init()\n self.rain_settings = RSettings()\n\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.rain_settings.screen_width = self.screen.get_rect().width\n self.rain_settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Raindrops\")\n\n self.rain = pygame.sprite.Group()\n\n self._create_rain()", "def setup(self):\n\n # Sprite lists\n self.all_sprites_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.ghost_list = arcade.SpriteList()\n\n # Set up the player\n self.score = 0\n self.coins_left = 25\n self.player_sprite = arcade.Sprite(\"pumpkin.png\", SPRITE_SCALING)\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 64\n\n\n self.boo_sprite = arcade.Sprite(\"boo.png\", 1)\n self.boo_sprite.set_position(500,500)\n\n self.game_over_sprite = arcade.Sprite(\"game_over.png\",1)\n self.game_over_sprite.set_position(500,500)\n\n \n\n#MAPPING START ###################################################\n\n\n mapArray = []\n\n mapFile = open(\"map.txt\",\"r\")\n\n content = mapFile.readline()\n\n line = 1\n\n while content:\n\n mapArray.append(content)\n\n content = mapFile.readline()\n\n \"\"\" SET UP THE MAIN MAP FILE \"\"\"\n MapFinal = []\n for row in range(32):\n MapRow = ['']\n for column in range(24):\n MapColumn = ['']\n MapRow.append(MapColumn)\n MapFinal.append(MapRow)\n\n for a in range(32):\n for b in range(24):\n if mapArray[a][b] == \"w\":\n MapFinal[a][b] = \"w\"\n elif mapArray[a][b] == \"t\":\n MapFinal[a][b] = \"t\"\n elif mapArray[a][b] == \"-\":\n MapFinal[a][b] = \"-\"\n\n\n for x in range(32):\n for y in range(24):\n\n if MapFinal[x][y] == 'w':\n x_block, y_block = locator(x,y)\n wall = arcade.Sprite(\"box.png\", BOX_SCALING)\n wall.center_x = x_block\n wall.center_y = y_block\n self.wall_list.append(wall)\n\n ## MAPPING END #############################################\n\n # -- Randomly place coins where there are no walls\n # Create the coins\n for i in range(NUMBER_OF_COINS):\n\n coin = arcade.Sprite(\"apple.png\", APPLE_SCALING)\n\n # --- IMPORTANT PART ---\n\n # Boolean variable if we successfully placed the coin\n coin_placed_successfully = False\n\n # Keep trying until success\n while not coin_placed_successfully:\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(SCREEN_HEIGHT)\n\n # See if the coin is hitting a wall\n wall_hit_list = arcade.check_for_collision_with_list(coin, self.wall_list)\n\n # See if the coin is hitting another coin\n coin_hit_list = arcade.check_for_collision_with_list(coin, self.coin_list)\n\n if len(wall_hit_list) == 0 and len(coin_hit_list) == 0:\n # It is!\n coin_placed_successfully = True\n\n # Add the coin to the lists\n self.coin_list.append(coin)\n\n\n #Create the ghosts\n for i in range(NUMBER_OF_GHOSTS):\n\n ghost = arcade.Sprite(\"ghost.png\", GHOST_SCALING)\n ghost_placed_successfully = False\n while not ghost_placed_successfully:\n ghost.center_x = random.randrange(SCREEN_WIDTH)\n ghost.center_y = random.randrange(SCREEN_HEIGHT)\n\n wall_hit_list = arcade.check_for_collision_with_list(ghost, self.wall_list)\n coin_hit_list = arcade.check_for_collision_with_list(ghost, self.coin_list)\n ghost_hit_list = arcade.check_for_collision_with_list(ghost, self.ghost_list)\n player_hit_list = arcade.check_for_collision(ghost, self.player_sprite)\n \n if len(wall_hit_list)==0 and len(coin_hit_list)==0 and len(ghost_hit_list)== 0 and (player_hit_list)==0:\n ghost_placed_successfully = True\n\n self.ghost_list.append(ghost)\n\n\n\n # --- END OF IMPORTANT PART ---\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.wall_list)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def __init__(self, i, j):\n pygame.sprite.Sprite.__init__(self)\n #self.image = pygame.Surface([30,30])\n #self.image.fill(self.wallColor)\n self.image = pygame.image.load('stone_wall.png').convert_alpha()\n self.pos = (i*30,j*30,)\n self.rect = pygame.Rect(i*30,j*30,30,30)\n self._layer = 2", "def __init__(self, width, height, title):\r\n super().__init__(width, height, title)\r\n\r\n # door progress list\r\n self.doors_progress_list = None\r\n\r\n # door return list\r\n self.doors_return_list = None\r\n\r\n # wall list\r\n self.wall_list = None\r\n\r\n # npc list\r\n self.npc_list = None\r\n\r\n # boss list\r\n self.boss_list = None\r\n\r\n # EnemiesShoot\r\n self.enemies_shoot_list = None\r\n\r\n # Enemies\r\n self.enemies_list = None\r\n\r\n # locked blocks\r\n self.locked_blocks_list = None\r\n\r\n # breakable blocks\r\n self.breakable_blocks_list = None\r\n\r\n # Movable blocks\r\n self.movable_blocks_list = None\r\n\r\n # switch blocks\r\n self.switch_blocks_list = None\r\n\r\n # keys\r\n self.keys_list = None\r\n\r\n # hearts\r\n self.hearts_list = None\r\n\r\n # switches\r\n self.switches_list = None\r\n\r\n # moving platforms horizontal\r\n self.moving_plat_horizontal_list = None\r\n\r\n # moving platforms vertical\r\n self.moving_plat_vertical_list = None\r\n\r\n # bounce the platforms horizontal\r\n self.bounce_moving_plat_horizontal_list = None\r\n\r\n # bounce the platforms vertical\r\n self.bounce_moving_plat_vertical_list = None\r\n\r\n # platforms\r\n self.platforms_list = None\r\n\r\n # dont touch\r\n self.dont_touch_list = None\r\n\r\n # back ground list\r\n self.background_list = None\r\n\r\n # Sprite lists\r\n self.player_list = None\r\n\r\n # Set up the player\r\n self.player = None\r\n\r\n # physics engine\r\n self.physics_engine = None\r\n\r\n # map change\r\n self.map_change = 1\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.plane = Plane(0,200)\n self.background = [Background(0),Background(1024),Background(2048)]\n self.bullets = []\n self.enemies = [Enemy(1080, randint(100,356))]", "def __init__(self):\n\n # load and set up pygame\n pygame.init()\n\n # create our window\n self.window = pygame.display.set_mode((520, 600))\n\n # clock for ticking\n self.clock = pygame.time.Clock()\n\n # set the window title\n pygame.display.set_caption(\"Pygame Tutorial 4 - Breakout\")\n\n # tell pygame to only pay attention to certain events\n # we want to know if the user hits the X on the window, and we\n # want keys so we can close the window with the esc key\n pygame.event.set_allowed([QUIT, KEYDOWN, MOUSEBUTTONDOWN])\n\n # make background\n self.background = pygame.image.load(os.path.join('images','background.jpg'))\n # blit the background onto the window\n self.window.blit(self.background, (0,0))\n # flip the display so the background is on there\n pygame.display.flip()\n\n # create sprite group for blocks\n self.blocks = pygame.sprite.RenderUpdates()\n\n # create sprite group for everything else\n self.sprites = pygame.sprite.RenderUpdates()\n\n # create our blockfactory object\n self.blockfactory = BlockFactory()\n\n # create a blank level\n self.resetLevel()\n\n # Save button sprite\n self.savebutton = Button((260,450), 'Save')\n self.sprites.add(self.savebutton)\n\n # feedback sprite\n self.feedback = TextSprite((260, 550), '')\n self.sprites.add(self.feedback)", "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.scenes = {\"menu\": MenuScene(),\n \"settings\": SettingsScene(),\n \"score\": ScoreScene(),\n \"game\": GameScene(),\n \"pause\": PauseScene(),\n \"game_over\": GameOverScene(),\n \"logo\": LogoScene()}\n self.scene_name = \"logo\" # start scene\n self.previous_scene_name = None\n self.scene = self.scenes[self.scene_name]\n\n self.__manager = ResourceManager()\n self.__display = self.settings.get_display()\n self.__display.fill(BACKGROUND_COLOR)\n pygame.display.flip()", "def __init__(self):\n\t\t# Screen size settings\n\t\t# Note that these values are commented out because we're using\n\t\t# full screen mode.\n\t\t#self.screen_width = 1200\n\t\t#self.screen_height = 600\n\n\t\t# Color definitions and background/color setting\n\t\tmidnight_blue = (0, 3, 36)\n\t\tblack = (0, 0, 0)\n\t\twhite = (255, 255, 255)\n\t\tself.bg_color = midnight_blue\n\n\t\tself.bg_image = pygame.image.load('images/space_bg.jpg')\n\n\t\t# Rocket settings\n\t\tself.max_speed = 3\n\t\tself.acceleration = 0.01\n\t\tself.rotation_speed = 3\n\t\t# Starts facing upwards\n\t\tself.rotation_angle = 271\n\n\t\t# Bullet settings\n\t\tself.bullet_speed = 8\n\t\tself.bullet_width = 3\n\t\tself.bullet_height = 15\n\t\tself.bullet_color = (60, 60, 60)\n\t\tself.bullets_allowed = 3", "def __init__(self):\n #Screen configuration\n self.screen_width = 1200\n self.screen_height = 680\n self.bg_color = (0,20,50)\n \n #Hero configuration\n #Increase of ship speed to 1.5 pixels instead of 1\n #self.hero_speed_factor = 1.5\n self.hero_limit = 3\n \n #Syringes (bullets) configuration\n #self.bullet_speed_factor = 1\n self.bullets_allowed = 5\n \n #Covids configuration\n self.covid_vertical_speed_factor = 1\n #The value of the movement is negative because it is increasing\n # from the right to the left\n #self.covid_horizontal_speed_factor = -10\n #The pandemy direction equals 1 means to the bottom; -1 means to the top\n # The randint ensures an randomly direction when starting the game\n #if randint(0,1) == 1:\n # self.pandemy_direction = 1\n #else:\n # self.pandemy_direction = -1\n\n #The rate that increases the game speed\n self.speedup_scale = 1.1\n \n self.initialize_dynamic_settings()", "def setup(self):\n self.poly2 = Polygon([(145, 60), (201, 69), (265, 46), (333, 61), (352, 99), (370, 129), (474, 138), (474, 178), (396, 225), (351, 275), (376, 312), (382, 356), (338, 368), (287, 302), (224, 304), (128, 338), (110, 316), (129, 270), (83, 231), (65, 51), (83, 163), (103, 201), (90, 74), (126, 162)])\n self.poly2.set_direction(\"E\")\n self.poly1 = Polygon([(905, 328),(877, 367),(944, 413),(1004, 384),(1019, 307),(953, 248),(880, 250),(865, 278),(883, 325)])\n self.poly1.set_direction(\"SW\")\n self.poly3 = Polygon([(900, 600), (950,650), (1000, 500)])\n self.poly3.set_direction(\"N\")\n self.p1 = Point(485, 138)\n self.p1.set_direction(\"SE\")\n self.p2 = Point(self.width/2, self.height/2)\n self.p2.set_direction(\"NW\")\n self.p3 = Point(86,163)\n self.p3.set_direction(\"SE\")\n #a separate list for each different type of shape for collision purposes.\n self.polys = [self.poly1, self.poly2, self.poly3]\n self.points = [self.p1, self.p2, self.p3]", "def __init__(self):\r\n # Screen settings\r\n self.screen_width = 990\r\n self.screen_height = 990\r\n self.bg_color = (115, 204, 0)\r\n self.player_speed = 30\r\n self.enemy_speed = 45\r\n self.bomb_width = 90\r\n self.bomb_height = 90\r\n self.bomb_color = (96,96,96)\r\n self.max_bombs = 1\r\n self.bomb_radius = 45\r\n self.color_1 = (200, 200, 200)\r\n self.color_2 = (0, 0, 0)\r\n self.row_width = self.screen_width / 11\r\n self.col_width = self.screen_width / 11\r\n\r\n self.red_points = 0\r\n self.blue_points = 0\r\n\r\n self.wall_types = {\r\n 'wall': 1,\r\n 'barell': 2,\r\n }", "def setup(self):\n\n self.total_time = 0.0\n\n self.background = arcade.load_texture(\"images\\\\background-1_0 (1).png\")\n\n # Create the Sprite lists\n self.all_sprites_list = arcade.SpriteList()\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.myobject_list = arcade.SpriteList()\n\n # Set up the player\n self.gameover = 0\n self.score = 0\n self.lives = 4\n self.collision_time = 0\n self.numobj = STARTING_OBJECTS_COUNT\n self.ncoins = COIN_COUNT\n self.player_sprite = VehicleSprite(\"images\\\\bugatti.png\",\n CHARACTER_SCALING)\n self.player_sprite.angle = 90\n # self.player_sprite.change_y = 1\n self.all_sprites_list.append(self.player_sprite)\n\n self.create_buddies()\n self.create_treasure()\n\n # Make the mouse disappear when it is over the window.\n # So we just see our object, not the pointer.\n\n # Set the background color\n arcade.set_background_color(arcade.color.ASH_GREY)\n\n # Set up the player, specifically placing it at these coordinates.\n # self.player_sprite = arcade.Sprite(\"images\\\\carcar.png\", CHARACTER_SCALING)\n # self.player_sprite.center_x = 500\n # self.player_sprite.center_y = 110\n # self.player_sprite.angle = 90\n # self.player_sprite.change_y = 1\n # self.player_list.append(self.player_sprite)\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,\n self.wall_list)\n\n # Set the viewport boundaries\n # These numbers set where we have 'scrolled' to.\n self.view_left = 0\n self.view_bottom = 0\n\n # For draw\n self.line_start = 0", "def init():\n global tube, ball, faceTextureName, woodTextureName\n tube = gluNewQuadric()\n gluQuadricDrawStyle(tube, GLU_FILL)\n ball = gluNewQuadric()\n gluQuadricDrawStyle(ball, GLU_FILL)\n\n # Set up lighting and depth-test\n glEnable(GL_LIGHTING)\n glEnable(GL_NORMALIZE) # Inefficient...\n glEnable(GL_DEPTH_TEST) # For z-buffering!\n\n generateCheckerBoardTexture()\n faceTextureName = loadImageTexture(\"brick.jpg\")\n woodTextureName = loadImageTexture(\"wood.jpg\")", "def __init__(self):\n\n self.score = 0\n self.game_over = False\n # Create sprite lists\n self.block_list = pygame.sprite.Group()\n self.all_sprites_list = pygame.sprite.Group()\n\n # Create the block sprites\n for i in range(50):\n block = Block()\n block.rect.x = random.randrange(SCREEN_WIDTH)\n block.rect.y = random.randrange(-300, SCREEN_HEIGHT)\n\n self.block_list.add(block)\n self.all_sprites_list.add(block)\n\n self.player = Player()\n self.all_sprites_list.add(self.player)", "def __init__(self):\n super().__init__()\n self.texture = arcade.load_texture(\":resources:/images/enemies/slimeBlue.png\")\n\n # Reset the viewport, necessary if we have a scrolling game and we need\n # to reset the viewport back to the start so we can see what we draw.\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def __init__(self, width, height):\n super().__init__(width, height)\n\n self.rifle = Rifle()\n self.score = 0\n\n self.bullets = []\n\n # Initialize the list of targets to an empty list\n self.targets = []\n\n # List of clouds\n self.clouds = []\n\n # I set the background color to sky blue to set up a background for the game\n arcade.set_background_color(arcade.color.SKY_BLUE)\n\n # Initialize this to zero to create variables. These will be changed in\n # the on_mouse_motion by setting them equal to the x and y of the mouse\n # which will be used in other functions\n self.mouse_x = 0.0\n self.mouse_y = 0.0\n\n # Determine the number of clouds to add to the list. This is initialized here\n # so that the game will start with a random number of clouds each time it's played\n self.num_clouds = random.randint(0, 5)\n # A loop to add Cloud objects to the list of clouds.\n for i in range(self.num_clouds):\n self.clouds.append(Cloud())", "def __init__(self, sprite_sheet_data):\n pygame.sprite.Sprite.__init__(self)\n\n sprite_sheet = SpriteSheet(\"resources/tiles_spritesheet.png\")\n # Grab the image for this platform\n self.image = sprite_sheet.get_image(sprite_sheet_data[0],\n sprite_sheet_data[1],\n sprite_sheet_data[2],\n sprite_sheet_data[3])\n\n self.rect = self.image.get_rect()", "def setup(self):\n # inicializamos el juego\n\n # Sprite lists\n self.player_list = arcade.SpriteList() # sera lista de personajes\n self.coin_list = arcade.SpriteList() # sera lista de monedas\n self.bullet_list = arcade.SpriteList() # lista de disparos\n\n # Set up the player\n self.score = 0\n\n # Image from kenney.nl\n # cargamos el sprite del jugador\n self.player_sprite = arcade.Sprite(\"character.png\", SPRITE_SCALING_PLAYER)\n # establecemos el inicio de posicion de nuestro jugador\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 70\n # lo agregamos a la lista de nuestros jugadores\n self.player_list.append(self.player_sprite)\n\n # Create the coins\n for i in range(COIN_COUNT):\n\n # Create the coin instance\n # Coin image from kenney.nl\n # cargamos las monedas\n coin = arcade.Sprite(\"coin_01.png\", SPRITE_SCALING_COIN)\n\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(120, SCREEN_HEIGHT)\n\n # Add the coin to the lists\n # lo agregamos a la lista\n self.coin_list.append(coin)\n\n # Set the background color\n # esto aun nose para que sirve\n arcade.set_background_color(arcade.color.AMAZON)", "def register_shapes():\n turtle.Screen().register_shape(\"saphire.gif\")\n turtle.Screen().register_shape(\"player_right.gif\")\n turtle.Screen().register_shape(\"player_left.gif\")\n turtle.Screen().register_shape(\"walls.gif\")", "def create(self, pygame):\n\n white = (255,255,255)\n self.obstacle_img = pygame.image.load(\"./Images/Obstacle.png\").convert()\n self.obstacle_img.set_colorkey(white)\n\n for i in range(8):\n self.random_objects.append(pygame.image.load(\"./Images/Object{}.png\".format(i+1)).convert())\n # self.random_objects[i].set_colorkey(white)", "def __init__(self, sprite_sheet_data):\n pygame.sprite.Sprite.__init__(self)\n\n sprite_sheet = SpriteSheet(\"cookie.png\")\n # Grab the image for this platform\n self.image = sprite_sheet.get_image(sprite_sheet_data[0],\n sprite_sheet_data[1],\n sprite_sheet_data[2],\n sprite_sheet_data[3])\n\n\n self.rect = self.image.get_rect()", "def __init__(self):\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (0, 230, 0)\n\n # Glove Settings\n self.glove_move_speed = 0.25\n self.glove_size = 100\n\n # Ball Settings\n self.ball_move_speed = 0.25\n self.ball_size = 40", "def __init__(self):\n\n # Call the parent class (sprite) constructor\n super().__init__()\n # Create image of block and fill with color.\n self.image = pygame.Surface([20, 20])\n self.image.fill(BLACK)\n\n # Fetch rectangle object that has dimensions of image. Update position of object by setting values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self):\n if platform == \"win32\":\n import ctypes\n user32 = ctypes.windll.user32\n screensize = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)\n\n self.screen_width = screensize[0]\n self.screen_height = screensize[1] - 100\n elif platform == \"darwin\" or platform == \"linux\" or platform == \"linux2\":\n self.screen_width = 1200\n self.screen_height = 800\n\n self.bg_color = (230, 230, 230)\n\n self.ship_limit = 3\n\n self.fleet_drop_speed = 10\n\n # How quickly the game speeds up\n self.speed_up_scale = 1.1\n\n # How quickly the alien point values increase\n self.score_scale = 1.5\n\n self.initialize_dynamic_settings()\n\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = (60, 60, 60)\n self.bullets_allowed = 3\n self.time_freeze = 0.5", "def __init__(self, path_to_json_settings=None):\n self.score = 0\n self.maxScore = 0\n try:\n with open(path_to_json_settings) as f:\n self.settings = json.load(f)\n except TypeError: # If file is not found, initialize with some basic settings\n self.settings = {\"screen_height\": 480, \"screen_width\": 480, \"grid_size\": 20, \"start_size\": 3, \"walls\": []}\n except FileNotFoundError:\n self.settings = {\"screen_height\": 480, \"screen_width\": 480, \"grid_size\": 20, \"start_size\": 3, \"walls\": []}\n\n # make sure that the values are set no matter what (i.e. json given as parameter, but does not have attributes)\n if \"screen_height\" not in self.settings:\n self.settings[\"screen_height\"] = 480\n if \"screen_width\" not in self.settings:\n self.settings[\"screen_width\"] = 480\n if \"grid_size\" not in self.settings:\n self.settings[\"grid_size\"] = 20\n if \"start_size\" not in self.settings:\n self.settings[\"start_size\"] = 480\n if \"walls\" not in self.settings:\n self.settings[\"walls\"] = []\n if \"start_size\" not in self.settings:\n self.settings[\"start_size\"] = 2\n\n self.settings[\"grid_size\"] = min(80, self.settings[\"grid_size\"])\n self.settings[\"screen_height\"] = min(768, self.settings[\"screen_height\"])\n self.settings[\"screen_width\"] = min(768, self.settings[\"screen_width\"])\n\n self.base_grid = self.init_grid() # Have a basic grid so we won't reinitialize it every time the player dies\n self.grid = deepcopy(self.base_grid)\n self.snake = Snake(self.grid, self.settings['start_size'])\n self.food = Food()\n\n # code needed to make pygame work\n pygame.init()\n pygame.display.set_caption('Snake!')\n snake_image = pygame.image.load('snake-icon.png')\n pygame.display.set_icon(snake_image)\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((self.settings[\"screen_width\"], self.settings[\"screen_height\"]),\n flags=pygame.SCALED, depth=32, vsync=True)\n self.block_width = self.settings[\"screen_width\"] / self.settings[\"grid_size\"]\n self.block_height = self.settings[\"screen_height\"] / self.settings[\"grid_size\"]\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.font = pygame.font.SysFont(\"monospace\", 20)", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def __init__(self):\n\t\t# Screen setting.\n\t\tself.screen_width = 1200\n\t\tself.screen_height = 800\n\t\tself.bg_color = (230, 230, 230)\t\n\n\t\t#Ship setting\n\t\tself.ship_speed_factor = 10\t\t\n\t\tself.ship_limit = 3\t\t\t# number ship \n\n\t\t# Bullet setting.\n\t\tself.bullet_speed_factor = 3\n\t\tself.bullet_width = 3\n\t\tself.bullet_height = 15\n\t\tself.bullet_color = (60,60,60) #dark gray bullet\n\t\tself.bullets_allowed = 6\t\t# number bullet in screen\n\n\t\t#Alien setting.\n\t\tself.alien_speed_factor = 3\n\t\tself.fleet_drop_speed = 50\n\t\t# fleet_direction of 1 represents right; -1 represents left. \n\t\tself.fleet_direction = 1\n\n\t\t# Scoring\n\t\tself.alien_points = 50\n\n\t\t# How quickly the game speed ups\n\t\tself.speedup_scale = 1.1\n\t\tself.iniitialize_dynamic_settings()\n\t\t# How quickly score increase.\n\t\tself.score_scale = 1.5", "def setup_game(self):", "def __init__(self):\n\n # Change directory into the directory above this file - the\n # one containng the 'res' tree. Note that if we've been built via\n # py2exe, we will actually be in a zip file so account for that.\n path = os.path.dirname(os.path.dirname(__file__))\n if (os.path.basename(path) == \"library.zip\"):\n path = os.path.dirname(path)\n os.chdir( path )\n sys.path += [\".\"]\n\n # Services exposed to the entities.\n self.game_services = SpaceGameServices(self)\n\n # The resource loader.\n self.resource_loader = resource.ResourceLoader()\n\n # The configuration.\n if os.path.isfile(\"./config.txt\"):\n self.config = self.resource_loader.load_config_file_from(\"./config.txt\")\n else:\n self.config = self.resource_loader.load_config_file(\"base_config.txt\")\n\n # Create the renderer.\n renderer_name = self.config.get_or_default(\"renderer\", \"src.pygame_renderer.PygameRenderer\")\n renderer_class = utils.lookup_type(renderer_name)\n screen_size = (self.config.get_or_default(\"screen_width\", 1024),\n self.config.get_or_default(\"screen_height\", 768))\n self.renderer = renderer_class(screen_size, self.config, data_path=\"./res\")\n\n # The resource loaded needs a renderer to load images etc.\n self.resource_loader.set_renderer(self.renderer)\n\n # The input handling system.\n self.input_handling = None\n\n # The enemy.\n self.wave_spawner = None\n\n # Create the entity manager.\n self.entity_manager = ecs.EntityManager(self.game_services)\n\n # Configure the resource loader.\n self.resource_loader.set_minimise_image_loading(\n self.config.get_or_default(\"minimise_image_loading\", False)\n )\n\n # The drawing visitor.\n self.drawing = drawing.Drawing(self.game_services)\n\n # Is the game running?\n self.running = False\n\n # Should we load the game?\n self.want_load = False\n\n # Should we pause the game?\n self.want_pause = False\n\n # Should we unpause the game?\n self.want_resume = False\n\n # Should we simulate one frame and then pause?\n self.want_step = False", "def __init__(self, world_map, GRID_LOCK, coordinates=None):\n\n ''' Take parameters, and Sprite Constants '''\n super(BeesSprite, self).__init__(world_map, BeesSprite.IMAGE, GRID_LOCK,\n BeesSprite.HEALTH_BAR, BeesSprite.AVG_SPEED,\n BeesSprite.VISION, coordinates)\n\n self.type = \"bees\"\n self.prey = [\"plant\"]", "def __init__(self, gear_num, x, y):\n pygame.sprite.Sprite.__init__(self)\n \n self.__gear_num = gear_num\n \n if self.__gear_num == 0:\n self.image = pygame.image.load(\"./MiscImages/diamondchestplate.png\")\n elif self.__gear_num == 1:\n self.image = pygame.image.load(\"./MiscImages/diamondhelmet.png\")\n elif self.__gear_num == 2:\n self.image = pygame.image.load(\"./MiscImages/diamondsword.png\")\n elif self.__gear_num == 3:\n self.image = pygame.image.load(\"./MiscImages/diamondboots.png\")\n \n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y", "def __init__(self, ss_game):\n super().__init__()\n self.screen = ss_game.screen\n self.settings = ss_game.settings\n\n # Load the alien image and set its rect attribute.\n self.image = pygame.image.load('images/alien_ship.png')\n self.rect = self.image.get_rect()\n\n # Start each new alien at a random position on the right side\n # of the screen.\n self.rect.left = self.screen.get_rect().right\n # The farthest down the screen we'll place the alien is the height\n # of the screen, minus the height of the alien.\n alien_top_max = self.settings.screen_height - self.rect.height\n self.rect.top = randint(0, alien_top_max)\n\n # Store the alien's exact horizontal position.\n self.x = float(self.rect.x)", "def __init__(self):\n pygame.init()\n\n self.screen = pygame.display.set_mode((1200, 800))\n pygame.display.set_caption(\"Sideways Hero\")\n\n self.ship = Ship(self)\n\n # Make a group of three aliens.\n self.aliens = pygame.sprite.Group()\n for alien_num in range(3):\n new_alien = AlienShip(self)\n self.aliens.add(new_alien)\n\n pygame.mixer.music.load(\"sounds/explosion.mp3\")", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._bottle = None\n self._cowboy = None\n self._furnishing = None\n self._has_hazard = False\n self._is_balcony = False\n self._tile_east = None\n self._tile_north = None\n self._tile_south = None\n self._tile_west = None\n self._x = 0\n self._y = 0\n self._young_gun = None", "def __init__(self, width, height, title):\n ## INIT FUNCTION ##\n super().__init__(width, height, title)\n\n ## APPENDING THE SPRTIES ##\n self.shape_list = None\n self.num_key = 0\n\n self.win = arcade.load_texture(\"Numbers/won.png\")\n self.lost = arcade.load_texture(\"Numbers/lost.png\")\n\n # Define variables to check for completeness and accuracy\n self.done = False\n self.correct = False\n self.incorrect = False\n\n self.current_selected = None\n\n # If continuing saved game, convert strings from saved game file to lists and set equal to self.grid and self.fixed_answer\n if new == False:\n self.fixed_answer = Cameron.str_to_list(answer)\n self.grid = Cameron.str_to_list(progress)\n # If starting new game, generate unique board and save solution to text file\n elif new == True:\n self.board = SuDoku(SIZE, (DIV_ROW, DIV_COL), difficulty)\n self.answer = self.board.get_solution()\n self.grid = self.board.get_puzzle()\n self.fixed_answer = self.answer\n\n ## GENERATES BACKGROUND ##\n arcade.set_background_color(arcade.color.BLACK)\n self.recreate_grid()", "def __init__(self):\n # screen settings\n self.screen_width = 800\n self.screen_height = 600\n self.bg_color = (238, 238, 228)\n # ship settings\n self.ship_speed_factor = 0.9\n self.ship_limit = 3\n # bullet settings\n self.bullet_speed_factor = 0.5\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = 64, 64, 64\n self.bullets_allowed = 3\n # aliens settings\n self.alien_speed_factor = 0.3\n self.fleet_drop_speed = 12\n # fleet derection right = 1, left = -1\n self.fleet_direction = 1", "def __init__(self):\n #Screen settings\n #orginal width: 1200, original height: 800\n self.screen_width = 1300\n self.screen_height = 750\n self.bg_color = (230, 230, 230)\n\n\n #ship settings\n self.ship_speed = 2\n self.ship_limit = 4\n\n #Bullet settings\n self.bullet_speed = 1.0\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = (60, 60, 60) \n self.bullets_allowed = 3\n\n #Alien settings\n self.alien_speed = 0.5\n self.fleet_drop_speed = 30\n #fleet direction of 1 = right; -1 = left\n self.fleet_direction = 1", "def __init__(self):\n self._running = True\n self.size = self.width, self.height = 640, 400\n self.screen = pygame.display.set_mode(self.size, pygame.HWSURFACE)\n pygame.display.set_caption('Arkanoid')\n self.clock = pygame.time.Clock()\n self.tool = 'run'\n self.player = Circle()\n\n \"\"\"Finite-state maschine\"\"\"\n self.states = {'Game' : 0, 'Win' : 1, 'Lose' : 2}\n self.state = self.states['Game']\n\n \"\"\"Dictionary\"\"\"\n self.main_platform = Platform_main()\n self.platforms = {\n 0: Platform(),\n 1: Platform(x = 145, y = 10),\n 2: Platform(x = 285, y = 10),\n 3: Platform(x = 410, y = 10),\n 4: Platform(x = 520, y = 10),\n 5: Platform(x = 100, y = 100),\n 6: Platform(x = 230, y = 100),\n 7: Platform(x = 350, y = 100),\n 8: Platform(x = 490, y = 100),\n 9: Platform(x = 50, y = 50),\n 10: Platform(x = 150, y = 50),\n 11: Platform(x = 270, y = 50),\n 12: Platform(x = 400, y = 50),\n }\n self.platformsx = {\n 0: Platformx(x = 70, y = 30),\n 1: Platformx(x = 560, y = 120),\n 2: Platformx(x = 220, y = 75),\n }\n self.to_remove = set()", "def __init__(self, width, height):\r\n super().__init__(width, height)\r\n\r\n self.rifle = Rifle()\r\n self.score = 0\r\n\r\n self.bullets = []\r\n\r\n # TODO: Create a list for your targets (similar to the above bullets)\r\n self.targets = []\r\n\r\n arcade.set_background_color(arcade.color.WHITE)", "def __init__(self, world_map, GRID_LOCK, coordinates=None):\n\n ''' Take parameters, and Sprite Constants '''\n super(EagleSprite, self).__init__(world_map, EagleSprite.IMAGE, GRID_LOCK,\n EagleSprite.HEALTH_BAR, EagleSprite.AVG_SPEED,\n EagleSprite.VISION, coordinates)\n\n self.type = \"eagle\"\n self.prey = [\"fish\"]\n self.movable_terrain = world_map.tile_types\n self.shadow = self.SHADOW_IMAGE\n self.shadow_tile = self.world_map.get_tile_by_index((self.tile.location_t[1] + 1, self.tile.location_t[0]))", "def __init__(self, s_width, s_height, setup):\n pygame.init()\n pygame.font.init()\n\n self.arcade = False\n fullscreen = False\n for opt in setup:\n if opt == Setup.Arcade:\n self.arcade = True\n elif opt == Setup.Fullscreen:\n fullscreen = True\n \n self.joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]\n for j in self.joysticks:\n j.init()\n\n self.display = Display((s_width, s_height), fullscreen)\n self.clock = pygame.time.Clock()\n self.FPS = 60\n\n self.ui = UI(self. display)\n if self.arcade:\n if len(self.joysticks) == 0: \n print(\"=================== plug in the controller ===================\") \n exit(1)\n self.ui.enable_arcade_mode()\n \n self.selected_speed = \"speed Medium\"\n self.game_manager = GameManager(self.display, self.ui, GameMode.EatToGrow, GameState.Menu)", "def __init__(self):\n # TODO: !!! EXPLAIN ALL THE TODO's in this file in the report !!!\n # Call the super class (SceneBase) initialization method. This\n # statement ensures that this class inherits its behaviour from its Superclass.\n # Abstract methods of all scenes (process_input(), update(), render(), etc.), screen\n # resolutions, text fonts, general text drawing methods and so on.\n super().__init__()\n\n # --------------------------------------------------------------------------------------------------------------\n # Pygame environment where all the sprites(images) are managed. This environment also displays\n # the text and is responsible for opening and closing new windows, checking button clicks and\n # event occurrences.\n\n # Initialize the environment and all the objects except the players objects:\n # terrain, borders, landing pad, stars, background, etc.\n self.space = pm.Space() # Pymunk space - the active game environment\n self.space.gravity = EARTH_GRAVITY # Adjust the environment characteristics\n self.borders() # Create the solid borders encapsulating the space\n self.random_terrain() # Generate the random terrain of the space\n\n # Initialize the Landing pad object (creates both Pymunk body,shape and Pygame sprite surface)\n self.landing_pad = LandingPad(self.screen_width, self.screen_height) # Pygame representation\n\n # Pymunk representation - A Pymunk segment object that is created based on the position of\n # the Pygame sprite\n self.pm_landing_pad = self.landing_pad.pymunk_pad(self.space, self.screen_height)\n self.game_controls = Controls.get_controls() # Fetch the game controls\n self.star_field = StarField(self.screen_width, self.screen_height) # The stars moving in the background\n self.background = pg.image.load(\"assets/frames/splash_BG.jpg\") # A background image\n self.release_time = 0 # Used for making the cooldown function of the shooter.\n # Between 0 and 120 frames (2 sec)\n # --------------------------------------------------------------------------------------------------------------\n # Pymunk space object which represents our physical, realistic world. It creates and\n # handles all the bodies and shapes behind the Pygame sprites. All the physical forces in the\n # Pymunk space act on the bodies of the objects and they then determine what the behaviour of\n # the sprites will be.\n\n self.anti_spacecraft = AntiSpaceCraft() # Anti-spacecraft vehicle instance\n self.spacecraft = Spacecraft() # Spacecraft instance\n\n # Collision handlers look for shapes with certain collision types\n # 2 -> spacecraft which is set in the class constructor\n # 3 -> missile which is set in the anti_spacecraft.create_missile() method\n # 4 -> wall segments\n self.missile_and_spacecraft_handler = self.space.add_collision_handler(2, 3)\n self.missile_and_terrain = self.space.add_collision_handler(4, 3)\n self.spacecraft_and_terrain_handler = self.space.add_collision_handler(2, 4)\n\n self.start_collision_handlers() # Set the 4 callback methods of the handlers\n self.add_objects_to_space() # Add spacecraft and anti-spacecraft Pymunk representations to space\n\n self.spacecraft_pts = 0 # Spacecraft player points attribute\n self.anti_spacecraft_pts = 0 # Anti-spacecraft player points attribute\n\n # Clock attributes to calculate the impulse strength to be applied to the missile\n self.end_time = 0\n self.start_time = 0\n self.clock_img = pg.image.load(\"assets/frames/timer.png\") # Load the clock icon image", "def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n \n self.image = pygame.image.load(\"./background stuff/space invader sign thing.png\")\n self.rect = self.image.get_rect()\n self.rect.center = (540, 200)", "def init_game():\n return BoardRenderer('LifeSim', GRID_SIZE, BLOCK_SIZE), World(GRID_SIZE, LAKE_SIZE, FOREST_WIDTH)", "def __init__(self, x, y, width, height,skin):\n self.rect = pygame.Rect(x, y, width, height)\n # la idea es eventualmente cambiar esto con image.get_rect() cuando hayan sprites, por eso las llamadas en #position\n\n # position\n self.rect.x = x\n self.rect.y = y\n\n # movement\n self.speed_x = 9 # esta es constante\n self.speed_y = 0 # esta varía\n self.left = False\n self.right = False\n self.rising = False\n self.falling = False\n self.push = False\n self.moveRight = False\n self.count = 10\n self.moveCount = 0\n self.fallingCount = 0\n\n #poderes\n self.forcePush = False\n self.doubleJump = False\n self.shield = False\n self.clock = 0\n self.clockStart = 0\n\n #arbol\n self.tree = None", "def setUp(self):\n self.game = BuildGame()\n self.effects = []", "def __init__(self):\n\n self.score = 0\n self.game_over = False\n\n # Create sprite lists\n self.all_sprites_list = pygame.sprite.Group()\n\n # Create the player\n self.player = Player(5, 5)\n self.all_sprites_list.add(self.player)", "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke\")\n self.engines = [GameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "def on_init(self):\n pygame.init()\n self.background.load_from_file()\n self.hero.load_from_file()\n self.enemy.load_from_file()\n\n # Some music and sound fx\n # frequency, size, channels, buffersize\n # pygame.mixer.pre_init(44100, 16, 2, 4096)\n self.effect = pygame.mixer.Sound('sounds/bounce.wav')\n pygame.mixer.music.load('sounds/music.wav')\n pygame.mixer.music.play(-1)\n\n self.hero.screen = self.background.screen\n self.enemy.screen = self.background.screen\n self.clock = pygame.time.Clock()\n pygame.display.set_caption(\n 'Angry Floating Guy! World: {} (w to change world, arrows to move, Esc to quit).'.format(self.current_world.name))\n\n self._running = True", "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke Demo\")\n self.engines = [DemoGameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "def __init__(self, width, height):\n super().__init__(width, height)\n\n self.ball = Ball()\n self.paddle = Paddle()\n self.score = 0\n\n # These are used to see if the user is\n # holding down the arrow keys\n self.holding_left = False\n self.holding_right = False\n\n arcade.set_background_color(arcade.color.WHITE)", "def initialise(self):\n #Initialize Everything\n pygame.init()\n self.screen = pygame.display.set_mode((640, 480))\n pygame.display.set_caption('VacuumFire')\n pygame.mouse.set_visible(0)\n #icon\n icon, foo = utils.load_image('icon.png')\n pygame.display.set_icon(icon)\n\n self.game_paused = False\n #sounds\n self.sounds = {};\n self.sounds['music'] = utils.load_sound('archivo.ogg')\n self.sounds['warning'] = utils.load_sound('warning.wav')\n self.sounds['powerup'] = utils.load_sound('powerup.wav')\n self.sounds['music'].play()\n #Create The Backgound\n self.background = Background(self.screen.get_size())\n #game variables\n self.score = 0\n #Display The Background\n self.screen.blit(self.background, (0, 0))\n pygame.display.flip()\n\n\n #The player's ship\n self.ship = Ship()\n #The player's ship\n self.lifemeter = LifeMeter()\n self.player = pygame.sprite.RenderPlain((self.ship))\n #group that stores all enemies\n self.enemies = pygame.sprite.Group()\n #group that stores all powerups\n self.powerups = pygame.sprite.Group()\n #group that stores all the lasers the player shoots\n self.fire = pygame.sprite.Group()\n #group for information sprites in the screen, should be rendered the last one\n self.hud = pygame.sprite.Group()\n self.explosions = pygame.sprite.Group()\n self.hud.add(self.lifemeter)\n #The level\n self.level = Stage('level_1')\n self.font = utils.load_font('4114blasterc.ttf', 36)\n\n\n self.clock = pygame.time.Clock()\n\n self.game_started = False\n self.game_finished = False", "def __init__(self, center, option, typ):\n pygame.sprite.Sprite.__init__(self)\n self.option = option\n self.typ = typ\n if self.option == 0:\n if self.typ == \"music\":\n self.image = loadImage(\"music.png\", True)\n elif self.typ == \"sound\":\n self.image = loadImage(\"sound.png\", True)\n elif self.typ == \"infty\":\n self.image = loadImage(\"infty.png\", True)\n\n elif self.option == 1:\n if self.typ == \"music\":\n self.image = loadImage(\"no_music.png\", True)\n elif self.typ == \"sound\":\n self.image = loadImage(\"no_sound.png\", True)\n elif self.typ == \"infty\":\n self.image = loadImage(\"no_infty.png\", True)\n\n self.rect = self.image.get_rect()\n self.rect.center = center", "def init():\n pygame.init()\n\n global display\n display = pygame.display.set_mode((GAME_WIDTH,GAME_HEIGHT),0,32)\n\n pygame.display.set_caption('Polygon Breeder')\n\n return create_initial_pictures()", "def init_pygame(self):\n # Startup the pygame system\n pygame.init()\n # Create our window\n self.screen = pygame.display.set_mode((Settings.width, Settings.height))\n # Set the title that will display at the top of the window.\n pygame.display.set_caption(self.title)\n # Create the clock\n self.clock = pygame.time.Clock()\n self.last_checked_time = pygame.time.get_ticks()\n # Startup the joystick system\n pygame.joystick.init()\n # For each joystick we find, initialize the stick\n for i in range(pygame.joystick.get_count()):\n pygame.joystick.Joystick(i).init()\n # Set the repeat delay for key presses\n pygame.key.set_repeat(Settings.key_repeat)\n # Create statistics font\n self.statistics_font = pygame.font.Font(None,30)" ]
[ "0.726422", "0.72362995", "0.72362995", "0.721465", "0.7195567", "0.71046937", "0.7090733", "0.7078919", "0.70652205", "0.7000575", "0.6993204", "0.6991222", "0.6986432", "0.6951341", "0.69142735", "0.69040245", "0.68818647", "0.6844078", "0.6843965", "0.68359494", "0.6834616", "0.68248737", "0.679973", "0.67883575", "0.67870337", "0.67856", "0.6767598", "0.67544746", "0.6717814", "0.6715695", "0.67051804", "0.670076", "0.6677002", "0.6673534", "0.66679937", "0.6658775", "0.6646469", "0.66348696", "0.66287744", "0.66287744", "0.6624351", "0.6623816", "0.66237175", "0.66135746", "0.66120297", "0.6611421", "0.6605651", "0.66047543", "0.6576729", "0.6564303", "0.65602", "0.6552071", "0.65486634", "0.6545061", "0.6542544", "0.65313333", "0.6530595", "0.6526283", "0.65257764", "0.6524497", "0.6523048", "0.652226", "0.6520199", "0.6505985", "0.6505147", "0.6502206", "0.6501708", "0.6486886", "0.64842826", "0.6484215", "0.64825416", "0.6476977", "0.6469982", "0.64668894", "0.6460436", "0.645927", "0.6455972", "0.6450273", "0.6445711", "0.64292854", "0.64259213", "0.64252347", "0.6424014", "0.64225566", "0.64176905", "0.64110136", "0.64100915", "0.6405592", "0.64053977", "0.6394548", "0.63757104", "0.63751644", "0.6371118", "0.6366984", "0.63618857", "0.6356449", "0.6354364", "0.6352573", "0.63511026", "0.63504297" ]
0.7013884
9
Instead of rendering each wall block, we create a single shape which can be drawn in a single call, rather than a call for each wall block
def create_wall_shape(self): self.shape_walls = arcade.ShapeElementList() self.shape_walls.center_x = 0 self.shape_walls.center_y = 0 self.shape_walls.angle = 0 point_list = [] color_list = [] # create the walls into a single shape walls = self.game.walls for wall in walls: points = self.get_entity_dimensions(wall) point_list.append(points[0]) point_list.append(points[1]) point_list.append(points[2]) point_list.append(points[3]) # as we have 4 points for i in range(4): color_list.append(COLOUR_MAP[wall.base_colour]) self.shape_walls.append( arcade.create_rectangles_filled_with_colors(point_list, color_list) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_walls(self):\n for x in range(self.width):\n self.add_thing(Wall(), (x, 0))\n self.add_thing(Wall(), (x, self.height - 1))\n\n for y in range(self.height):\n self.add_thing(Wall(), (0, y))\n self.add_thing(Wall(), (self.width - 1, y))", "def _draw_walls(self, draw_grid):\n for yi, y in enumerate(self._grid):\n for xi, x in enumerate(y):\n for i, w in enumerate(x.walls):\n if i == 0 and w:\n draw_grid[yi * 2 + 1][xi * 2] = self._wall_color\n if i == 1 and w:\n draw_grid[yi * 2 + 1][xi * 2 + 2] = self._wall_color\n if i == 2 and w:\n draw_grid[yi * 2][xi * 2 + 1] = self._wall_color\n if i == 3 and w:\n draw_grid[yi * 2 + 2][xi * 2 + 1] = self._wall_color\n return draw_grid", "def add_walls(self):\n for x in range(self.width + 1):\n if not self.some_things_at((x, 0), Wall):\n self.add_thing(Wall(), (x, 0))\n if not self.some_things_at((x, self.height), Wall):\n self.add_thing(Wall(), (x, self.height))\n\n for y in range(self.height + 1):\n if not self.some_things_at((0, y), Wall):\n self.add_thing(Wall(), (0, y))\n if not self.some_things_at((self.width, y), Wall):\n self.add_thing(Wall(), (self.width, y))\n #self.add_thing(Wumpus(),(1,3))\n #self.add_thing(Pit(),(3,3))\n #self.add_thing(Pit(),(3,1))\n #self.add_thing(Gold(),(2,3))\n #self.add_thing(Pit(),(4,4))", "def build_blocks():\n block_1 = GRect(375, 80, x=20, y=330)\n block_1.filled = True\n block_1.color = 'firebrick'\n block_1.fill_color = 'firebrick'\n window.add(block_1)\n block_2 = GRect(375, 80, x=405, y=330)\n block_2.filled = True\n block_2.color = 'steelblue'\n block_2.fill_color = 'steelblue'\n window.add(block_2)\n block_3 = GRect(375, 80, x=20, y=420)\n block_3.filled = True\n block_3.color = 'goldenrod'\n block_3.fill_color = 'goldenrod'\n window.add(block_3)\n block_4 = GRect(375, 80, x=405, y=420)\n block_4.filled = True\n block_4.color = 'forestgreen'\n block_4.fill_color = 'forestgreen'\n window.add(block_4)\n block_5 = GRect(60, 40, x=720, y=120)\n block_5.filled = True\n block_5.color = 'dodgerblue'\n block_5.fill_color = 'dodgerblue'\n window.add(block_5)\n circle_1 = GOval(90, 90, x=20, y=170)\n circle_1.filled = True\n circle_1.color = 'blueviolet'\n circle_1.fill_color = 'blueviolet'\n window.add(circle_1)", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def build_wall(self, type, pos1, pos2, thickness=1):\n raise NotImplementedError", "def make_boundary_wall(self, height, width) -> None:\n for x in range(0, width):\n Wall(self, x, 0)\n Wall(self, x, height - 1)\n for y in range(1, height - 1):\n Wall(self, 0, y)\n Wall(self, width - 1, y)", "def corridor(x,z, emap, width=10, length=10, height=10, details=None, walls=\"ns\", name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n solid_objects = []\r\n\r\n if \"n\" in walls:\r\n # TODO: abstract out the mostly-duplicate code in these cases...\r\n nwall = SolidObject(name+str(wallnum),\r\n Size(length, height, 1),\r\n Position(x, emap.calcHeight(x, z) + height / 2, n-0.5), 0)\r\n solid_objects.append(nwall)\r\n nwallmodel = createMyCuboid(nwall.w() * 2, nwall.h() * 2, nwall.d() * 2,\r\n name=name+str(wallnum),\r\n x=nwall.x(),y=nwall.y(),z=nwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(nwallmodel)\r\n else:\r\n nwall.setmodel(nwallmodel, details)\r\n\r\n\r\n wallnum += 1\r\n\r\n if \"s\" in walls:\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, emap.calcHeight(x, z)+height / 2, s+0.5), 0)\r\n solid_objects.append(swall)\r\n swallmodel = createMyCuboid(swall.w()*2, swall.h()*2, swall.d()*2,\r\n name=name+str(wallnum),\r\n x=swall.x(), y=swall.y(), z=swall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0,cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(swallmodel)\r\n else:\r\n swall.setmodel(swallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"e\" in walls:\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e-0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(ewall)\r\n ewallmodel = createMyCuboid(ewall.w()*2, ewall.h()*2, ewall.d()*2,\r\n name=name+str(wallnum),\r\n x=ewall.x(), y=ewall.y(), z=ewall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ewallmodel)\r\n else:\r\n ewall.setmodel(ewallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"w\" in walls:\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w+0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(wwall)\r\n wwallmodel = createMyCuboid(wwall.w()*2, wwall.h()*2, wwall.d()*2,\r\n name=name+str(wallnum),\r\n x=wwall.x(), y=wwall.y(), z=wwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(wwallmodel)\r\n else:\r\n wwall.setmodel(wwallmodel, details)\r\n wallnum += 1\r\n\r\n if \"o\" not in walls:\r\n ceiling = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, emap.calcHeight(x, z)+height+0.5, z), 0)\r\n solid_objects.append(ceiling)\r\n ceilingmodel = createMyCuboid(ceiling.w()*2, ceiling.h()*2, ceiling.d()*2,\r\n name=name+str(wallnum),\r\n x=ceiling.x(), y=ceiling.y(), z=ceiling.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ceilingmodel)\r\n else:\r\n ceiling.setmodel(ceilingmodel, details)\r\n\r\n wallnum += 1\r\n\r\n return solid_objects", "def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)", "def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r", "def create_outer_walls(space,width,height):\n static_lines = [pymunk.Segment(space.static_body, (0.0, 0.0), (width, 0.0), 0.0),\n pymunk.Segment(space.static_body, (width, 0.0), (width, height), 0.0),\n pymunk.Segment(space.static_body, (width, height), (0.0, height), 0.0),\n pymunk.Segment(space.static_body, (0.0, 600.0), (0.0, 0.0), 0.0)]\n for line in static_lines:\n line.friction = 0.5\n line.elasticity = 0.9\n\n return static_lines", "def create(self):\n\t\t# Pick a random starting position not on the parameter\n\t\tx = random.randint(1, self.width - 2)\n\t\ty = random.randint(1, self.height - 2)\n\n\t\t# Set node as floor and adjacent nodes as walls\n\t\tself.setFloor(x, y)\n\t\tself.setWall(x - 1, y)\n\t\tself.setWall(x + 1, y)\n\t\tself.setWall(x, y - 1)\n\t\tself.setWall(x, y + 1)\n\n\t\t# Create list of wall positions\n\t\tself._walls = []\n\t\tself._walls.append((x - 1, y))\n\t\tself._walls.append((x + 1, y))\n\t\tself._walls.append((x, y - 1))\n\t\tself._walls.append((x, y + 1))\n\t\t\n\t\twhile self._walls:\n\t\t\t# Pick random wall position\n\t\t\tx, y = random.choice(self._walls)\n\n\t\t\t# Check if this node divides an empty node and a floor node\n\t\t\tif (x > 0 and x < self.width - 1) and (y > 0 and y < self.height - 1):\n\t\t\t\tif ((self._isEmpty(x - 1, y) and self.isFloor(x + 1, y))\n\t\t\t\tor (self._isEmpty(x + 1, y) and self.isFloor(x - 1, y))\n\t\t\t\tor (self._isEmpty(x, y - 1) and self.isFloor(x, y + 1))\n\t\t\t\tor (self._isEmpty(x, y + 1) and self.isFloor(x, y - 1))):\n\t\t\t\t\t# Check there are less than 2 adjacent floor nodes\n\t\t\t\t\tif self.countAdjacentFloorNodes(x, y) < 2:\n\t\t\t\t\t\t# Set current node as a floor\n\t\t\t\t\t\tself.setFloor(x, y)\n\n\t\t\t\t\t\t# Set adjacent empty tiles to walls and add to list of wall positions\n\t\t\t\t\t\tif x > 0:\n\t\t\t\t\t\t\tself._makeWall(x - 1, y)\n\t\t\t\t\t\tif x < self.width - 1:\n\t\t\t\t\t\t\tself._makeWall(x + 1, y)\n\t\t\t\t\t\tif y > 0:\n\t\t\t\t\t\t\tself._makeWall(x, y - 1)\n\t\t\t\t\t\tif y < self.height - 1:\n\t\t\t\t\t\t\tself._makeWall(x, y + 1)\n\n\t\t\t# Remove the current position from the list of wall positions\n\t\t\tfor wall in self._walls:\n\t\t\t\tif (wall[0] == x and wall[1] == y):\n\t\t\t\t\tself._walls.remove(wall)\n\t\t\n\t\t# Fill in any empty nodes as walls\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tif self._isEmpty(x, y):\n\t\t\t\t\tself.setWall(x, y)", "def draw(self):\n if self.master != None :\n fill = Cell.FILLED_COLOR_BG\n outline = Cell.FILLED_COLOR_BORDER\n\n if not self.fill:\n fill = Cell.EMPTY_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n walls[self.ord][self.abs] = 0\n else:\n walls[self.ord][self.abs] = 1\n\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def build_wall(self): #py:UR.build_wall\n RUR._UR.build_wall_(self.body)", "def draw_block():\n turtle.down()\n turtle.begin_fill()\n turtle.pensize(3)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.end_fill()\n turtle.up()", "def empty_diff_walls():\n\t# 4 side walls are absorptive\n\troom_materials = [pra.Material(energy_absorption=0.1, scattering=None)] * 4\n\t# floor and ceiling are reflective\n\troom_materials.extend([pra.Material(energy_absorption=0.98, scattering=None)] * 2)\n\t\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_materials))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)\n\n\troom.add_source([-5, 2, 2.])\n\troom.add_microphone([1, 0, 2.])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.compute_rir()\n\n\treturn room", "def draw_block_element(self, cr, x, y):\n cr.rectangle(\n self.wall_width+x*self.block_size, \n (self.block_height-y-1)*self.block_size, \n self.block_size, self.block_size\n )\n \n cr.set_source_rgb(0.2, 0.25, 0.5)\n cr.fill_preserve()\n\n cr.set_source_rgb(0.8,0.8,0.8)\n cr.set_line_width(self.block_size/10)\n cr.stroke()", "def draw_house_walls(x, y, width, height):\n print('Типа рисую стены...', x, y, width, height)", "def circlePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n \n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def vizualize_wall(self):\n\n\t\t#Points are converted from polar to cartesian here\n\t\tpoint1 = Point()\n\t\t#(-math.pi/4) represents the 45 degree rotation of the front point\n\t\t#from the front of the robot\n\t\tpoint1.x = math.cos((-math.pi/4))*self.front_point\n\t\tpoint1.y = math.sin((-math.pi/4))*self.front_point\n\t\tpoint2 = Point()\n\t\t#(-3*math.pi/4) represents the back point's 90 degree rotaion from\n\t\t#the front point\n\t\tpoint2.x = math.cos((-3*math.pi/4))*self.back_point\n\t\tpoint2.y = math.sin((-3*math.pi/4))*self.back_point\n\t\tmy_marker = Marker(type=Marker.LINE_STRIP)\n\t\tmy_marker.header.frame_id = \"base_link\"\n\t\tmy_marker.color.a = 1\n\t\tmy_marker.scale.x = .1\n\t\tmy_marker.points = [point1, point2]\n\t\tself.visualizer.publish(my_marker)", "def render_wall(win, color, direction, pos):\n x, y = pos\n\n if direction == 'S':\n width = CELL_SIZE\n height = BORDER\n x = x*CELL_SIZE\n y = (y+1)*CELL_SIZE\n\n elif direction == 'E':\n width = BORDER\n height = CELL_SIZE\n x = (x+1)*CELL_SIZE\n y = y*CELL_SIZE\n\n pygame.draw.rect(win, color, (x, y, width, height))", "def south_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y+height / 2, s), 0)\r\n self.walls.append(swall)\r\n model = Plane(w=swall.w()*2, h=swall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, swall.x(),swall.y(),swall.z(), rx=0.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def squarePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n for edges in SQUARE[\"EDGES\"]:\n for edge in edges:\n\n point = OpenMaya.MVector(edge[0], edge[1], edge[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n \n \n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n for polygons in SQUARE[\"POLYGONS\"]:\n for polygon in polygons:\n\n point = OpenMaya.MVector(polygon[0], polygon[1], polygon[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def add_to_default_batch(self):\n\n '''\n self.shape = shared.batch.add(4, gl.GL_QUADS, None,\n ('v2f', (self.x, self.y,\n self.x + self.width, self.y,\n self.x + self.width, self.y + self.height,\n self.x, self.y + self.height)))\n \n numPoints = 50\n verts = []\n for i in range(numPoints):\n angle = math.radians(float(i)/numPoints * 360.0)\n x = self.radius*cos(angle) + self.x\n y = self.radius*sin(angle) + self.y\n verts += [int(x),int(y)]\n \n '''\n data = create_circle(self.x, self.y, self.radius, shared.batch)\n\n self.shape = shared.batch.add_indexed(data[0], data[1], data[2], data[3], data[4], data[5])\n\n #self.shape = shared.batch.add(numPoints, gl.GL_POLYGON, None,\n # ('v2f', verts))", "def west_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w, y+height / 2, z), 0)\r\n self.walls.append(wwall)\r\n model = Plane(w=wwall.d()*2, h=wwall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, wwall.x(),wwall.y(),wwall.z(),rx=0.0,ry=90.0,rz=0.0)\r\n\r\n wallnum += 1", "def roof(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None, makeroof=True, makeceiling=True):\r\n global wallnum\r\n\r\n roof = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, y+height+self.ceilingthickness / 2, z), 0)\r\n self.walls.append(roof)\r\n roofmodel = Plane(w=length, h=width, name=name+str(wallnum))\r\n mergeshape.add(roofmodel,x,y+height+self.ceilingthickness,z,rx=90.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def build_wall(): #py:build_wall\n RUR._build_wall_()", "def north_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n nwall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y + height / 2, n), 0)\r\n self.walls.append(nwall)\r\n model = Plane(w=nwall.w()*2, h=nwall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, nwall.x(), nwall.y(), nwall.z())\r\n\r\n\r\n wallnum += 1", "def _draw_blocks(self):\n\t\tsurface = pygame.display.get_surface()\n\t\tcolors = {\"J\": (15, 105, 245), \"I\": (85, 235, 255), \n\t\t\t\t \"L\":(255, 170, 0), \"S\": (45, 255, 55), \"Z\": (255, 4, 0),\n\t\t\t\t \"O\": (238, 255, 0), \"T\": (245, 0, 255)}\n\t\ty = math.floor((self.window_height - (self.window_height*0.9))/2)\n\t\tx = math.floor((self.window_width - ((self.window_height*0.9)/20)*10)/2)\n\t\tincrement = math.floor((self.window_height*0.9)/20)\n\t\t# loops through board and draws to the correct spot\n\t\tfor i in range(4, len(self.gameboard.get_board())):\n\t\t\tfor j in range(len(self.gameboard.get_board()[i])):\n\t\t\t\tx_incremented = math.floor(x + (increment * j))\n\t\t\t\ty_incremented = math.floor(y + (increment * (i-4)))\n\t\t\t\tif self.gameboard.get_board()[i][j][0] in colors:\n\t\t\t\t\tpygame.draw.rect(surface, colors[self.gameboard.get_board()[i][j][0]],\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))\n\t\t\t\t\t\t\t\t\t# x, y, x_wid, y_len\n\t\t\t\telse:\n\t\t\t\t\tpygame.draw.rect(surface, (0,0,0),\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))", "def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()", "def create_wall_segments(self, points):\n if len(points) < 2:\n return []\n points = map(Vec2d, points)\n for i in range(len(points) - 1):\n v1 = Vec2d(points[i].x, points[i].y)\n v2 = Vec2d(points[i + 1].x, points[i + 1].y)\n wall_body = pymunk.Body()\n wall_shape = pymunk.Segment(wall_body, v1, v2, 3)\n wall_shape.friction = 1.0\n wall_shape.elasticity = 0.95\n #wall_shape.collision_type = COLLTYPE_DEFAULT\n self.space.add(wall_shape)\n self.walls.append(wall_shape)", "def regenerate(self, random_state):\n super(WallsCorridor, self).regenerate(random_state)\n wall_x = variation.evaluate(\n self._wall_gap, random_state=random_state) - _CORRIDOR_X_PADDING\n wall_side = 0\n wall_id = 0\n while wall_x < self._current_corridor_length:\n wall_width = variation.evaluate(\n self._wall_width, random_state=random_state)\n wall_height = variation.evaluate(\n self._wall_height, random_state=random_state)\n wall_rgba = variation.evaluate(self._wall_rgba, random_state=random_state)\n if variation.evaluate(self._swap_wall_side, random_state=random_state):\n wall_side = 1 - wall_side\n\n wall_pos = [\n wall_x,\n (2 * wall_side - 1) * (self._current_corridor_width - wall_width) / 2,\n wall_height / 2\n ]\n wall_size = [_WALL_THICKNESS / 2, wall_width / 2, wall_height / 2]\n self._walls_body.add(\n 'geom',\n type='box',\n name='wall_{}'.format(wall_id),\n pos=wall_pos,\n size=wall_size,\n rgba=wall_rgba)\n\n wall_id += 1\n wall_x += variation.evaluate(self._wall_gap, random_state=random_state)", "def setup_walls(self):\n self.wall_list = self.get_current_map().get_layer_by_name(\"walls\").sprite_list", "def _makeWall(self, x, y):\n\t\tif self._isEmpty(x, y):\n\t\t\tself.setWall(x, y)\n\t\t\tif (x, y) not in self._walls:\n\t\t\t\tself._walls.append((x, y))", "def _draw_block(self, block: Tuple[int, int], kind: str) -> None:\n # ToDo: implement display picture: https://pythonprogramming.net/displaying-images-pygame/\n if self.board_formatting[kind]['picture'] is not None:\n raise Exception('Displaying pictures has not yet been implemented!')\n else:\n rectangle = [block[1] * self.block_length, block[0] * self.block_length,\n self.block_length, self.block_length]\n pygame.draw.rect(self.display, self.board_formatting[kind]['color'], rectangle)", "def draw(self, shape):\n shape.draw(shader=self.shader)", "def divide(graph, draw, top_left, bottom_right):\n\n tl_row, tl_col = top_left\n br_row, br_col = bottom_right\n\n width = abs(tl_col - br_col) + 1\n height = abs(tl_row - br_row) + 1\n\n MINIMUM_SIZE = 4\n\n if width < MINIMUM_SIZE or height < MINIMUM_SIZE:\n return\n\n orientation = choose_orientation(width, height)\n\n time.sleep(SLEEP_SPEEP)\n\n if orientation == VERTICAL:\n wall_col = randint(tl_col + 1, br_col - 1)\n\n # draw walls\n # To compesate for the gap between walls\n for i in range(-2, height + 2):\n graph.make_wall((tl_row + i, wall_col))\n\n hole_1_row = randint(tl_row, br_row - 2)\n hole_2_row = hole_1_row + 1\n hole_3_row = hole_1_row + 2\n\n graph.make_empty((hole_1_row, wall_col))\n graph.make_empty((hole_2_row, wall_col))\n graph.make_empty((hole_3_row, wall_col))\n\n draw()\n\n divide(graph, draw, top_left, (br_row, wall_col - 2))\n divide(graph, draw, (tl_row, wall_col + 2), bottom_right)\n\n else:\n wall_row = randint(tl_row + 1, br_row - 1)\n\n # draw walls\n # To compensate for the gap between the walls\n for i in range(-2, width + 2):\n graph.make_wall((wall_row, tl_col + i))\n\n hole_1_col = randint(tl_col, br_col - 2)\n hole_2_col = hole_1_col + 1\n hole_3_col = hole_1_col + 2\n\n graph.make_empty((wall_row, hole_1_col))\n graph.make_empty((wall_row, hole_2_col))\n graph.make_empty((wall_row, hole_3_col))\n\n draw()\n\n divide(graph, draw, top_left, (wall_row - 2, br_col))\n divide(graph, draw, (wall_row + 2, tl_col), bottom_right)", "def pygameMazeDraw (screen, arr, x, y, mobList, walls, monstors, exit, floors, entry):\n xLower = x-11\n yLower = y-11\n xUpper = x+11\n yUpper = y+11\n if xLower <= 0:\n xLower = 0\n xUpper = 21\n if yLower <= 0:\n yLower = 0\n yUpper = 21\n if xUpper >= len(arr):\n xUpper = len(arr)\n xLower = len(arr)-21\n if yUpper >= len(arr[0]):\n yUpper = len(arr[0])\n yLower = len(arr[0])-21\n #X and Y lower and upper are the bounds for the sprites being generated. This generates only the sprites that get displayed, so its more efficient\n for i in range((yLower), (yUpper), 1):\n for j in range((xLower), (xUpper), 1):\n if arr[i][j] == 9:\n florBlock = Flor(i,j)\n florBlock.add(floors)\n elif arr[i][j] == 5:\n wallBlock = Wal(i,j)\n wallBlock.add(walls)\n elif arr[i][j] == 1:\n florBlock = Flor(i,j)\n florBlock.add(floors)\n elif arr[i][j] == 6:\n entryBlock = Entry(i,j)\n entryBlock.add(entry)\n elif arr[i][j] == 7:\n exitBlock = Exit(i,j)\n exitBlock.add(exit)\n\n mobDraw(mobList, monstors)", "def init_blocks(self):\n length = self.physics.len_blocks\n rect = Rectangle(Vector(self.rpos.x, self.rpos.y),\n Vector(self.rpos.x + length, self.rpos.y + length))\n self.rects.append(rect)\n self.physics.add_block(rect, self.stype)", "def draw_foreground(self):\n index = 0\n for tile in self.foreground_data:\n if tile != self.empty_tile:\n x_pos = (index * self.tile_size) % self.w\n y_pos = math.floor((index * self.tile_size) / self.w) * self.tile_size\n b = Block(tile, x_pos, y_pos)\n self.screen.entity_layer_1.add(b)\n index += 1", "def generatePolygons():", "def _draw_mystery_block(self, instance: MysteryBlock, shape: pymunk.Shape,\n view: tk.Canvas, offset: Tuple[int, int]) -> List[int]:\n if instance.is_active(): # if MysteryBlock is active\n image = self.load_image(\"coin\")\n else:\n image = self.load_image(\"coin_used\")\n\n return [view.create_image(shape.bb.center().x + offset[0], shape.bb.center().y,\n image=image, tags=\"block\")]", "def set_channel_walls(self,walls=['left','right','top','bottom']):\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))", "def set_channel_walls(self,walls=['left','right','top','bottom']):\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))", "def draw_floor_plan(image, curr_box, label):\n wall_thickness = 2\n wall_symbol = 2.0\n x1, y1, x2, y2 = curr_box[0], curr_box[1], curr_box[2], curr_box[3]\n _, h, w = image.size()\n x1 = int(x1.item() * w)\n y1 = int(y1.item() * h)\n x2 = int(x2.item() * w)\n y2 = int(y2.item() * h)\n image[:, y1:y2, x1:x2] = label/13.0\n image[:, y1-wall_thickness:y1+wall_thickness, x1:x2] = wall_symbol\n image[:, y2-wall_thickness:y2+wall_thickness, x1:x2] = wall_symbol\n image[:, y1:y2, x1-wall_thickness:x1+wall_thickness] = wall_symbol\n image[:, y1:y2, x2-wall_thickness:x2+wall_thickness] = wall_symbol\n return image", "def make_flower(shape, x, y, c1, c2, l, s):\n shape.penup()\n shape.speed(20)\n shape.setpos(x, y)\n shape.color(c2, c1)\n shape.begin_fill()\n shape.pendown()\n for side in range(6):\n shape.left(60)\n shape.forward(s) # s stands for short side\n shape.right(60)\n shape.forward(l) # l stands for long side\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(l)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.end_fill()\n shape.pendown()\n\n shape.color(\"green\")\n shape.right(90)\n shape.penup()\n shape.forward(10)\n shape.pendown()\n shape.forward(110)\n shape.left(90)\n\n\n\n # ...", "def create_cycle(number_of_vertices, radius, thickness):\n\n global screen\n dist_apart = number_of_vertices * 15\n\n for i in range(0, number_of_vertices):\n vtx_x = int((WINDOW_WIDTH / 2) + math.cos((i * math.pi * 2)/number_of_vertices) * dist_apart)\n vtx_y = int((WINDOW_HEIGHT / 2) + math.sin((i * math.pi * 2)/number_of_vertices) * dist_apart)\n\n vtx = {\"ID\": i,\n \"x\": vtx_x,\n \"y\": vtx_y,\n \"color\": \"WHITE\",\n \"adjacent\": [],\n }\n\n VERTICES.append(vtx);\n\n # Assign adjacency\n for i in range(0, number_of_vertices):\n if i is not number_of_vertices - 1:\n VERTICES[i][\"adjacent\"].append(VERTICES[i + 1][\"ID\"])\n VERTICES[i + 1][\"adjacent\"].append(VERTICES[i][\"ID\"])\n else:\n VERTICES[i][\"adjacent\"].append(VERTICES[0][\"ID\"])\n VERTICES[0][\"adjacent\"].append(VERTICES[i][\"ID\"])\n\n draw_graph(VERTICES, radius, thickness)", "def draw_building():\n\n gerardo.penup()\n gerardo.backward(135)\n gerardo.pendown()\n gerardo.begin_fill()\n for i in range(2): # this loop draws out the rectangle for the building\n gerardo.forward(200)\n gerardo.right(90)\n gerardo.forward(100)\n gerardo.right(90)\n gerardo.end_fill()\n gerardo.hideturtle()", "def on_render(self):\n\n # set text font for in visualization\n pygame.font.init()\n font = pygame.font.SysFont('Arial', 24)\n\n # continue running algorithm until done\n if self.algorithm.isDone is False:\n self.algorithm.execute()\n\n # draw background\n for i in range(0, 3):\n for j in range(0, 3):\n self.screen.blit(self.img_grass, (i * 270, j * 270))\n\n # get water instances to draw and draw them\n allWatersList = []\n allWatersList.extend(self.area.allWatersList)\n\n for water in allWatersList:\n pygame.draw.rect(\n self.screen, (0, 0, 128),\n (water.x * 2,\n water.y * 2,\n water.width * 2,\n water.height * 2)\n )\n pygame.draw.rect(\n self.screen,\n (0, 0, 0),\n (0, self.height-50, self.width, 50)\n )\n\n # get house instances to draw and draw them\n housesToPlace = []\n housesToPlace.extend(self.area.mansionList)\n housesToPlace.extend(self.area.familyHomeList)\n housesToPlace.extend(self.area.bungalowList)\n\n for house in housesToPlace:\n # draw free space\n space = pygame.Surface((house.space * 4 + house.width * 2,\n house.space * 4 + house.height * 2))\n space.set_alpha(64)\n space.fill((180, 0, 0))\n self.screen.blit(space,\n (house.x * 2 - house.space * 2,\n house.y * 2 - house.space * 2))\n\n # draw minimum free space\n space = pygame.Surface((house.minimumSpace * 4 + house.width * 2,\n house.minimumSpace * 4 + house.height * 2))\n space.set_alpha(110)\n space.fill((100, 0, 0))\n self.screen.blit(space,\n (house.x * 2 - house.minimumSpace * 2,\n house.y * 2 - house.minimumSpace * 2))\n\n for house in housesToPlace:\n # draw house, colored based on type\n kind = type(house).__name__\n if kind == \"Mansion\":\n pygame.draw.rect(self.screen,\n (200, 255, 40),\n (house.x * 2,\n house.y * 2,\n house.width * 2,\n house.height * 2))\n elif kind == \"Bungalow\":\n pygame.draw.rect(self.screen,\n (255, 40, 200),\n (house.x * 2,\n house.y * 2,\n house.width * 2,\n house.height * 2))\n elif kind == \"FamilyHome\":\n pygame.draw.rect(self.screen,\n (0, 255, 0),\n (house.x * 2,\n house.y * 2,\n house.width * 2,\n house.height * 2))\n\n # Draw black bar at bottom of screen for extra info\n pygame.draw.rect(self.screen,\n (0, 0, 0),\n (0, self.height - 50, self.width, 50))\n\n # Draw area value and last value increase in infobox\n textSurface = font.render('Score: '\n + str(self.area.price),\n True, (255, 255, 255))\n self.screen.blit(textSurface, (10, self.height-35))\n\n # create distinct color for value decreases\n increaseColor = (255, 255, 255)\n if (self.area.price - self.lastPrice < 1):\n increaseColor = (80, 80, 80)\n textSurface = font.render('Increase: ' +\n str(self.area.price - self.lastPrice),\n True, increaseColor)\n self.screen.blit(textSurface, (330, self.height-35))\n pygame.draw.rect(self.screen,\n (0, 0, 0),\n (640, 0, 400, self.height))\n\n # save area values to draw graph\n if (self.area.price >= self.lastPrice and\n self.algorithm.isDone is False and\n self.showDecrease is False):\n\n self.scores.append(self.area.price)\n self.lastPrice = self.area.price\n\n if self.showDecrease is True and self.algorithm.isDone is False:\n self.scores.append(self.area.price)\n self.lastPrice = self.area.price\n\n # draw graph with area values\n fig = pylab.figure(figsize=[4, 4], # Inches\n dpi=100) # 100 dots per inch\n ax = fig.gca()\n ax.plot(self.scores)\n\n canvas = agg.FigureCanvasAgg(fig)\n canvas.draw()\n renderer = canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n\n surf = pygame.image.fromstring(raw_data, (400, 400), \"RGB\")\n self.screen.blit(surf, (640, 0))\n matplotlib.pyplot.close(fig)\n\n # Draw all time highest score if that's set\n if self.allTimeHigh is not 0:\n textSurface = font.render('Highest score: ' +\n str(self.allTimeHigh),\n True, (255, 255, 255))\n self.screen.blit(textSurface, (650, 410))\n\n pygame.display.flip()\n pass", "def draw(self, screen):\n for branch_points in self.branches:\n pygame.draw.polygon(screen, self.branch_color, branch_points)\n for bottom_points in self.bottom:\n pygame.draw.polygon(screen, self.bottom_color, bottom_points)", "def init_blocks(self):\n length = self.physics.len_blocks\n rect = Rectangle(self.rpos, self.rpos + Vector(length, length))\n self.rects.append(rect)\n self.physics.add_block(rect, 'bomberman')", "def theRoof(pos, blockTypeMain = wool , mainColor=wPurple, replaceGlass = wGlass):\n \n # try again the same trick to add the roof\n # Middle part\n for i in range(0,12,1):\n iy = i\n if i >= 6:\n iy=11-i\n #print i, iy\n mc.setBlocks(pos.x-4+i, pos.y+10+iy, pos.z+4,\n pos.x-4+i, pos.y+10+iy, pos.z+29, blockTypeMain, mainColor)\n\n # RIGHT SIDE of the house\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+5+ii,\n pos.x-13+ii, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+8,\n pos.x-11+ii, pos.y+9+ii, pos.z+26-ii, material)\n \n # and LEFT side of the house\n xAdjust = 21\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5-ii+xAdjust, pos.y+9+ii, pos.z+5+ii,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-7-ii+xAdjust, pos.y+9+ii, pos.z+8,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+26-ii, material)", "def draw_shape(self, r=0, g=0, b=0): # black is the default color\r\n turtles= turtle.Turtle()\r\n turtles.speed(0) # Makes the turtle speed up\r\n turtles.color(r, g, b)\r\n turtles.showturtle()\r\n turtles.penup()\r\n turtles.pendown()\r\n\r\n # draws the Shape to the screen\r\n\r\n for i in range(self.num_sides):\r\n turtles.forward(self.side_length)\r\n turtles.left(360/(self.num_sides))\r\n turtles.hideturtle()", "def setup_walls(self):\n self.wall_list = self.get_current_map().get_layer_by_name(\"Obstacles\").sprite_list", "def _prepare_grid(self):\n draw_grid = list()\n for x in range(len(self._grid) + len(self._grid) + 1):\n if x % 2 == 0:\n draw_grid.append([self._walk_area_color if x % 2 != 0 else self._wall_color\n for x in range(len(self._grid) + len(self._grid) + 1)])\n else:\n draw_grid.append([self._walk_area_color\n for _ in range(len(self._grid) + len(self._grid) + 1)])\n\n draw_grid = self._draw_walls(draw_grid)\n draw_grid = self._draw_treasures(draw_grid)\n draw_grid = self._draw_border(draw_grid)\n return draw_grid", "def create_wall():\n if config.W_LIST == []:\n pos = randint(config.M.x_pos+4, common.R2)\n if common.value_arr(pos, common.MIDS_R) == \" \" and \\\n common.value_arr(pos, common.MIDS_R+1) == \"0\":\n try:\n witem = obstacle.Wall(pos)\n config.W_LIST.append(witem)\n except config.GapHere:\n pass\n\n elif len(config.W_LIST) < int((3*common.COLS)/80):\n if randint(0, 10) == 5:\n # create a obstacle\n pos = config.W_LIST[-1].x_pos + randint(10, 20)\n if pos < common.COLS - 3:\n try:\n witem = obstacle.Wall(pos)\n config.W_LIST.append(witem)\n except config.GapHere:\n pass\n\n else:\n pass", "def regenerate(self, random_state):\n self._walls_body.geom.clear()\n corridor_width = variation.evaluate(self._corridor_width,\n random_state=random_state)\n corridor_length = variation.evaluate(self._corridor_length,\n random_state=random_state)\n self._current_corridor_length = corridor_length\n self._current_corridor_width = corridor_width\n\n self._ground_plane.pos = [corridor_length / 2, 0, 0]\n self._ground_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, corridor_width / 2, 1]\n\n self._left_plane.pos = [\n corridor_length / 2, corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._left_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._right_plane.pos = [\n corridor_length / 2, -corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._right_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._near_plane.pos = [\n -_CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._near_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._far_plane.pos = [\n corridor_length + _CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._far_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]", "def stamp_walls_and_make_lists(n, m, num_walls, square_size):\r\n global all_squares, free_squares, wall_squares, start_x, start_y\r\n # wall turtle stamper setup\r\n wall_stamper = turtle.clone()\r\n wall_stamper.shape(\"square\")\r\n wall_stamper.color(\"gray\")\r\n wall_stamper.hideturtle()\r\n wall_stamper.penup()\r\n # make the random wall coordinates\r\n start_x = - (m * square_size) // 2\r\n start_y = - (n * square_size) // 2\r\n # wall_coords = [(random.randint(0,n-1), random.randint(0,m-1)) for c in range(num_walls)]\r\n wall_coords = set() # to eliminate repeats\r\n while len(wall_coords) < num_walls: # to get exact number of walls\r\n random_pos = (random.randint(0,n-1), random.randint(0,m-1))\r\n wall_coords.add(random_pos)\r\n wall_coords = list(wall_coords) # easier way to deal with the coords\r\n # loop over all coordinates and construct the lists\r\n for r in range(n):\r\n for c in range(m):\r\n this_square = (start_x + (c * square_size), start_y + (r * square_size))\r\n if (r,c) in wall_coords: # if it's a wall\r\n wall_squares.append(this_square)\r\n wall_stamper.goto(this_square)\r\n wall_stamper.stamp()\r\n else: # it's a free square\r\n free_squares.append(this_square)\r\n all_squares.append(this_square)", "def _settle_shape(self, shape):\n if shape:\n for block in shape.blocks:\n self.array[block.row_position][block.column_position] = block\n self.remove_completed_lines()", "def create_hard_blocks(self):\n for x in xrange(1, self.map_size[0], 2):\n for y in xrange(1, self.map_size[1], 2):\n self.create_hard_block_at(x, y)", "def makeTheHouse(pos, blockTypeMain= wool, blockTypeSecond= wool,\n mainColor= wMagenta, secondColor= wWhite,\n myDoor= wDoorWood):\n\n ### FRONT (& BACK )###\n for Front in range(0,22,21): #This is the trick for the back copy...\n \n mc.setBlocks(pos.x-4, pos.y,pos.z+6+Front,\n pos.x+7, pos.y+9, pos.z+6+Front, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-3, pos.y+1,pos.z+6+Front,\n pos.x+6, pos.y+8, pos.z+6+Front, blockTypeSecond, secondColor)\n # FRONT - Remove blocks\n # Small trick to remove the 6 empty space by a loop\n #[[x,y],[x,y],[x,y],...]\n for i in [[-1,+1],[5,+1],[+2,0],[-1,+5],[2,+5],[5,+5]]:\n mc.setBlocks(pos.x+i[0], pos.y+i[1],pos.z+6+Front,\n pos.x+i[0]-1, pos.y+i[1]+2, pos.z+6+Front, air)\n #let's put the Glasses (that's almost the same than remove actually...)\n for i in [[-1,+1],[5,+1],[-1,+5],[2,+5],[5,+5]]:\n mc.setBlocks(pos.x+i[0], pos.y+i[1],pos.z+6+Front,\n pos.x+i[0]-1, pos.y+i[1]+2, pos.z+6+Front, wGlass_Pane)\n # The door at Entrance\n mc.setBlock(pos.x+1, pos.y, pos.z+6+Front, myDoor,4)\n mc.setBlock(pos.x+1, pos.y+1, pos.z+6+Front, myDoor,8)\n mc.setBlock(pos.x+2, pos.y, pos.z+6+Front, myDoor,1)\n mc.setBlock(pos.x+2, pos.y+1, pos.z+6+Front, myDoor,8)\n \n # ************\n \n # FRONT - Small top\n mc.setBlocks(pos.x-3, pos.y+10,pos.z+6+Front,\n pos.x+6, pos.y+14, pos.z+6+Front, blockTypeSecond, secondColor)\n mc.setBlocks(pos.x-1, pos.y+10,pos.z+6+Front,\n pos.x+4, pos.y+13, pos.z+6+Front, blockTypeMain, mainColor)\n mc.setBlocks(pos.x, pos.y+10,pos.z+6+Front,\n pos.x+3, pos.y+12, pos.z+6+Front, blockTypeSecond, secondColor)\n # FRONT-Small top Remove Blocks\n mc.setBlocks(pos.x+1, pos.y+11,pos.z+6+Front,\n pos.x+2, pos.y+12, pos.z+6+Front, air)\n # small trick to remove as \"stairs\" - funny ? no ?\n for i in range(0,10,1):\n iy = i\n if i > 5:\n iy=9-i\n #print i, iy\n mc.setBlocks(pos.x-3+i, pos.y+11+iy,pos.z+6+Front,\n pos.x-3+i, pos.y+15, pos.z+6+Front, air)\n # FRONT-Small Top put Glass\n mc.setBlocks(pos.x+1, pos.y+11,pos.z+6+Front,\n pos.x+2, pos.y+12, pos.z+6+Front, wGlass_Pane)\n\n\n # FRONT-Right & Left side \n for i in range(0,19,18):\n #print i\n mc.setBlocks(pos.x-4+i, pos.y,pos.z+7+Front,\n pos.x-11+i, pos.y+8, pos.z+7+Front, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-5+i, pos.y+1,pos.z+7+Front,\n pos.x-10+i, pos.y+7, pos.z+7+Front, blockTypeSecond, secondColor)\n # blocks removal\n mc.setBlocks(pos.x-6+i, pos.y+1,pos.z+7+Front,\n pos.x-9+i, pos.y+7, pos.z+7+Front, wGlass_Pane)\n # the line\n mc.setBlocks(pos.x-5+i, pos.y+4,pos.z+7+Front,\n pos.x-11+i, pos.y+4, pos.z+7+Front, blockTypeMain, mainColor)\n \n #remove 2 extra columns\n mc.setBlocks(pos.x-4, pos.y, pos.z+7,\n pos.x-4, pos.y+8, pos.z+7, air)\n mc.setBlocks(pos.x-4+11, pos.y, pos.z+7,\n pos.x-4+11, pos.y+8, pos.z+7, air)\n\n\n ### MAIN WALLS RIGHT & LEFT SIDE ###\n for wall in range(0,26,25):\n mc.setBlocks(pos.x-11+wall, pos.y, pos.z+8,\n pos.x-11+wall, pos.y+8, pos.z+28, blockTypeMain, mainColor)\n\n mc.setBlocks(pos.x-11+wall, pos.y+1, pos.z+8,\n pos.x-11+wall, pos.y+7, pos.z+27, blockTypeSecond, secondColor)\n\n for i in range(0,15,7):\n mc.setBlocks(pos.x-11+wall, pos.y+1,pos.z+9+i,\n pos.x-11+wall, pos.y+7, pos.z+12+i, wGlass_Pane)\n \n # the 3 lines\n mc.setBlocks(pos.x-11+wall, pos.y, pos.z+14,\n pos.x-11+wall, pos.y+8, pos.z+14, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-11+wall, pos.y, pos.z+21,\n pos.x-11+wall, pos.y+8, pos.z+21, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-11+wall, pos.y+4, pos.z+8,\n pos.x-11+wall, pos.y+4, pos.z+28, blockTypeMain, mainColor)\n\n\n \n\n #same \n #removeBlocks(pos.x-1, pos.y+2, pos.z+6, 2, \n pass", "def __draw(self, display, color, size):\n\t\tif self.walls[0]: # up\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size) , (self.col * size + size, self.row * size))\n\t\tif self.walls[3]: # down\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size + size), (self.col * size , self.row * size + size))\n\t\tif self.walls[1]: #left\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size) , (self.col * size + size, self.row * size + size))\n\t\tif self.walls[2]: #right\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size + size), (self.col * size , self.row * size))\n\n\t\tif self.current:\n\t\t\tdraw_rect_with_alpha(display, self.CURRENT_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.backtracked and self.SHOW_BACKTRACK:\n\t\t\tdraw_rect_with_alpha(display, self.BACKTRACKED_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.visited:\n\t\t\tdraw_rect_with_alpha(display, self.VISITED_COLOR, Vector((self.col, self.row)) * size, (size, size))", "def create_custom_graph(radius, thickness):\n\n global screen\n generating = True\n\n # Number of vertices created and the two vertices to connect\n vertices_created = 0\n vtx_one = None\n vtx_two = None\n\n while generating:\n\n for event in pygame.event.get():\n\n # Get all mouse click events\n if event.type == pygame.MOUSEBUTTONDOWN:\n\n # Store the click position's coordinates\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n # Get all keys pressed\n keys = pygame.key.get_pressed()\n\n # Create a vertex when clicking and pressing 'v'\n if keys[pygame.K_v]:\n vtx = {\"ID\": vertices_created,\n \"x\": mouse_x,\n \"y\": mouse_y,\n \"color\": \"WHITE\",\n \"adjacent\": [],\n }\n VERTICES.append(vtx);\n vertices_created += 1\n\n # Set the source vertex to whichever vertex was clicked on\n for vtx in VERTICES:\n if (is_clicked(vtx[\"x\"], vtx[\"y\"], mouse_x, mouse_y, RADIUS)):\n vtx_one = vtx\n \n if event.type == pygame.MOUSEBUTTONUP:\n\n # Store the click position's coordinates\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n # Set the destination vertex to whichever vertex was under the\n # cursor after the click\n for vtx in VERTICES:\n if (is_clicked(vtx[\"x\"], vtx[\"y\"], mouse_x, mouse_y, RADIUS)):\n vtx_two = vtx\n\n # If the source and destination vertices have values, connect them\n if vtx_one is not None and vtx_two is not None and vtx_one[\"ID\"] is not vtx_two[\"ID\"]:\n vtx_one[\"adjacent\"].append(vtx_two[\"ID\"])\n vtx_two[\"adjacent\"].append(vtx_one[\"ID\"])\n \n\n\n if event.type == pygame.KEYDOWN:\n\n # Reset the graph generation if 'r' is pressed\n if event.key == pygame.K_r:\n vertices_created = 0\n VERTICES.clear()\n vtx_one = None\n vtx_two = None\n screen.fill(BACKGROUND)\n\n # Delete the most recently made vertex and all of its adjacencies\n if event.key == pygame.K_u and vertices_created >= 1:\n vertices_created -= 1\n deleted = VERTICES.pop()\n for adj in deleted[\"adjacent\"]:\n VERTICES[adj][\"adjacent\"].remove(deleted[\"ID\"])\n vtx_one = None\n vtx_two = None\n screen.fill(BACKGROUND)\n \n # Delete the most recently drawn edge\n if event.key == pygame.K_e and vertices_created >= 2:\n if vtx_one[\"adjacent\"] and vtx_two[\"adjacent\"]:\n vtx_one[\"adjacent\"].pop()\n vtx_two[\"adjacent\"].pop()\n screen.fill(BACKGROUND)\n\n\n\n # Close window on pressing ESC\n if event.key == pygame.K_ESCAPE or event.key == pygame.K_c:\n generating = False\n\n # If the window is closed, exit the game\n if event.type == pygame.QUIT:\n generating = False\n \n draw_graph(VERTICES, RADIUS, THICKNESS)\n pygame.display.update()", "def ground(pos, mainColor= wPink, secondColor=wPurple):\n mc.setBlocks(pos.x, pos.y-1,pos.z,pos.x+3, pos.y-1,pos.z, wool, secondColor)\n \n i = 1\n #mc.setBlock(pos.x-1, pos.y-1,pos.z+1,wool, wPink)\n\n while i <= 8:\n mc.setBlock(pos.x-i, pos.y-1,pos.z+i,wool, secondColor)\n\n mc.setBlocks(pos.x-i+1, pos.y-1, pos.z+i,\n pos.x-i+1+2+(2*i), pos.y-1,pos.z+i,\n wool, mainColor)\n\n mc.setBlock(pos.x-i+3+(2*i), pos.y-1,pos.z+i,\n wool, secondColor)\n i += 1\n\n # build the larger area\n mc.setBlocks(pos.x-14, pos.y-1,pos.z+9,pos.x+17, pos.y-1,pos.z+37, wool, secondColor)\n mc.setBlocks(pos.x-13, pos.y-1,pos.z+10,pos.x+16, pos.y-1,pos.z+36, wool, mainColor)\n\n # remove purple line in front\n mc.setBlocks(pos.x-8, pos.y-1,pos.z+9,pos.x+11, pos.y-1,pos.z+9, wool, mainColor)\n\n ## End of ground fucntion ##", "def draw_room(screen, grid, start_location):\n wall_image = pygame.image.load(\"images/pillar.png\")\n wall_image_transparent = pygame.image.load(\"images/pillar_80.png\")\n floor_image = pygame.image.load(\"images/floor.png\")\n computer_image = pygame.image.load(\"images/desk_computer.png\")\n\n # map_to_image = [floor_image, # 0\n # wall_image, # 1\n # wall_image_transparent, # 2\n # computer_image] # 3\n map_to_image = {\n \"0\": floor_image,\n \"1\": wall_image,\n \"2\": wall_image_transparent,\n \"3\": computer_image,\n \"10\": wall_image # Secret passage\n }\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n # First draw floor everywhere\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))\n\n for tile_type in [1, 2, 3, 10]:\n the_rows, the_cols = np.where(grid == tile_type)\n for i in range(len(the_cols)):\n screen.blit(map_to_image[str(tile_type)], (the_cols[i] * 30 + start_location[0],\n the_rows[i] * 30 + start_location[1]))", "def __init__(self, total_length:int, initial_y:int, screen, number:int, debug:bool = False):\n\n #Call the superclass\n super().__init__()\n\n #Create the group of blocks based on x and y and add them to the group\n for k in range(number):\n for i in range(-1,2):\n for j in range(-2,3):\n self.add(Block(total_length * (k+1) // (number+1) + 10*j, initial_y + 10*i, screen, debug = debug))", "def draw_grid(self, surface):\n\n # put platform to the left\n (top, left) = get_surface_pos(self.flower_spawn_pos[0])\n surface.blit(self.platform, ((top-RADIUS, left-RADIUS), (0, 0)))\n\n unit_cell = [(.5 * RADIUS, 0),\n (1.5 * RADIUS, 0),\n (2 * RADIUS, SQRT3 / 2 * RADIUS),\n (1.5 * RADIUS, SQRT3 * RADIUS),\n (.5 * RADIUS, SQRT3 * RADIUS),\n (0, SQRT3 / 2 * RADIUS)]\n\n r = RADIUS*0.75\n unit_cell_inner = [(.5 * r, 0),\n (1.5 * r, 0),\n (2 * r, SQRT3 / 2 * r),\n (1.5 * r, SQRT3 * r),\n (.5 * r, SQRT3 * r),\n (0, SQRT3 / 2 * r)]\n\n # A point list describing a single cell, based on the radius of each hex\n for cell in self.cells:\n row, col = cell\n # Alternate the offset of the cells based on column\n offset = RADIUS * SQRT3 / 2 if col % 2 else 0\n # Calculate the offset of the cell\n top = offset + SQRT3 * row * RADIUS\n left = 1.5 * col * RADIUS\n # Create a point list containing the offset cell\n points = [(x + left, y + top) for (x, y) in unit_cell]\n points_inner = [(RADIUS/4 + x + left, RADIUS/4 + y + top) for (x, y) in unit_cell_inner]\n # Draw the polygon onto the surface\n\n if self.cell_state[cell]:\n pygame.draw.polygon(surface, (255, 204, 0), points, 0)\n pygame.draw.polygon(surface, (255, 255, 0), points_inner, 0)\n else:\n pygame.draw.polygon(surface, (125, 125, 0), points, 0)\n\n pygame.draw.polygon(surface, (0,0,0), points, 2)", "def draw_shape(self, shape):\n for row in range(len(shape.squares)):\n for col in range(len(shape.squares[0])):\n if shape.squares[row][col]:\n self.draw_square(shape.x + col, shape.y + row, shape.color)", "def _create_drawing_area(self):\n\n self.drawing_x = -self.size/2 + self.margin\n self.drawing_y = self.size/2 - self.margin\n self.drawing_width = self.size - self.margin * 2\n self.drawing_height = (self.size/2 + self.flat_fragment) - self.margin * 2\n \n self.drawing_x_step = self.drawing_width \n self.drawing_y_step = self.drawing_height", "def create_room(self):\n # iterate through array of room types\n rooms = []\n prob_block_5_list = []\n prob_block_6_list = []\n\n for row in self.room_type:\n for col in row:\n rooms.append(self.import_template(col))\n # iterate through rooms to fill screen\n # this number will be part of how we find location of top left corner of room\n # based on 5x5 grid of rooms\n for pos in range(25):\n # this will iterate through the number of columns in array\n # the number y will be part of how we find where to place the block on the y axis (according to pygame.draw)\n for y in range(self.blocks_per_room_y):\n # this will iterate through the number of rows in array\n # the number x will be part of how we find where to place the block on the x axis (according to pygame.draw)\n for x in range(self.blocks_per_room_x):\n # if cell is a 1 add a platform sprite\n if rooms[pos][y][x] is 1:\n #check if platform has another above it for graphics\n if rooms[pos][y - 1][x] in (0, 3, 4, 7) and y - 1 >= 0:\n # the cases checked in each of these conditionals are the basic case that check surrounding blocks\n # to see what platform we should be using, the edge cases, such as if a block is at the edge of\n # the room, in which case we need to check the neighboring room (array in this case)\n\n #check conditions to see if we are using the sprite with with rounded edges on the bottom right and top right\n if ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'right', self.theme)\n #check conditionals to see if we are using the sprite with rounded edges on the bottom left and top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1)\\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corners on top left and top right\n elif ((x + 1) < self.blocks_per_room_x and (x - 1) >= 0 and rooms[pos][y][x + 1] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4))\\\n or (x is 0 and pos > 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] in (0, 3, 4) and rooms[pos][y][x + 1] in (0, 3, 4))\\\n or (x is self.blocks_per_room_x - 1 and pos < 24 and rooms[pos + 1][y][0] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4)):\n block = Platform(self.block_width, self.block_height, 'round top', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1) \\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 1 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top right\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top right', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n coord_x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.x = coord_x\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n #if the space above this block is empty see if we spawn an enemy on the spot above current block\n if rooms[pos][y-1][x] is 0 and y - 1 >= 0:\n self.enemy_generation(coord_x, self.block_height + (pos // 5) * self.room_side_length_y + (y - 1) * self.block_height)\n # if the cell is a 3 then it will be an item pickup\n elif rooms[pos][y][x] is 3:\n rand = random.randrange(0, 4)\n if rand == 0:\n #calculate coordinates of the bag\n bag = pickupSprite('rope')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 1:\n #calculate coordinates of the bag\n bag = pickupSprite('knife')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 2:\n bag = pickupSprite('health')\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n\n\n # if the cell is a 4 then it will be either a spike, if the space is on the bottom of the room,\n # otherwise it is a randomized block or nothing\n elif rooms[pos][y][x] is 4:\n # if the cell is at the bottom of the level, randomly choose whether to place a spike or not\n rand = random.randrange(0, 3)\n rand2 = random.randrange(0, 2)\n if y is 6 and rand is 1:\n spike = enemies.Spikes()\n spike.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n spike.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n spike.player = self.player\n self.enemy_list.add(spike)\n # elif y is 6 and rand is 2:\n # dart = enemies.Darts(self.theme, 'up')\n # dart.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n # dart.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n # dart.player = self.player\n # self.enemy_list.add(dart)\n elif y != 6 and rand2 is 0:\n if rooms[pos][y - 1][x] is 0:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n block.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n elif y != 6 and rand2 is 1:\n if x-1 >= 0 and x+1 <= self.blocks_per_room_x and y-1 >= 0 and y+1 < self.blocks_per_room_y:\n if rooms[pos][y][x-1] is 0:\n direction = 'left'\n blockType = 'middle'\n elif rooms[pos][y][x+1] is 0:\n direction = 'right'\n blockType = 'middle'\n elif rooms[pos][y-1][x] is 0:\n direction = 'up'\n blockType = 'top'\n elif rooms[pos][y+1][x] is 0:\n direction = 'down'\n blockType = 'middle'\n else:\n direction = None\n if direction is not None:\n # use for both block and dart\n rectX = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n rectY = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n\n block = Platform(self.block_width, self.block_height, blockType, self.theme)\n block.rect.x = rectX\n block.rect.y = rectY\n block.player = self.player\n self.platform_list.add(block)\n\n dart = enemies.Darts(self.theme, direction)\n dart.rect.x = rectX\n dart.rect.y = rectY\n dart.player = self.player\n self.enemy_list.add(dart)\n # this is the starting and ending points of the level\n elif rooms[pos][y][x] is 7:\n # exit of the game on the top row of the level\n if pos // 5 is 0:\n #calculate coordinates of the exit\n self.exit_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.exit_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n exit = exit_door_sprite(self.block_width, self.block_height)\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n exit.rect.x = self.exit_coords['x']\n exit.rect.y = self.exit_coords['y']\n exit.player = self.player\n self.exit_sprite.add(exit)\n #entance of the game on the bottom row of the level\n elif pos // 5 is 4:\n #calculate coordinates of the entrance\n self.entrance_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.entrance_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height", "def draw_graphic(self):\r\n\r\n t = Turtle()\r\n text = Turtle()\r\n s = t.getscreen()\r\n s.bgcolor(\"orange\")\r\n count = 0\r\n while count < 1:\r\n text.penup()\r\n text.setposition(-100, -100)\r\n text.pencolor(\"purple\")\r\n text.write(\"{}, area: {:.2f}, perimeter: {:.2f}\".format(self.name, self.area(), self.perimeter()), align=\"left\",\r\n font=(\"Arial\", 20, \"bold\"))\r\n t.goto(0, 0)\r\n t.pen(pencolor=\"purple\", fillcolor=\"green\", pensize=6, speed=20)\r\n t.fillcolor(\"red\")\r\n t.begin_fill()\r\n t.pendown()\r\n t.circle(self.__radius)\r\n t.end_fill()\r\n delay(30)\r\n t.clear()\r\n t.reset()\r\n text.clear()\r\n text.reset()\r\n count += 1", "def east_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e, y+height / 2, z), 0)\r\n self.walls.append(ewall)\r\n model = Plane(w=ewall.d()*2, h=ewall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, ewall.x(),ewall.y(),ewall.z(), rx=0.0,ry=90.0,rz=0.0)\r\n\r\n wallnum += 1", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def draw_block(self):\n draw_component = DrawComponent(self.component_spot,self.component_type)\n return draw_component", "def begin_draw(self):\n pygame.init()\n self.display = pygame.display.set_mode(self.disp_size)\n pygame.display.set_caption('Map Editing')\n font = pygame.font.SysFont(\"arial\", 15)\n strings = [\"Press ESC to Start Drawing Obstacles\",\n \"Click Left to Draw & Right to Erase\",\n \"To finish Drawing,press Escape \",\n \"During search, Escape or Close to Quit\",\n \"you can also draw during the search, but it won't ba saved\"]\n texts = [font.render(s, True, (255, 255, 255)) for s in strings]\n for i, text in enumerate(texts):\n self.display.blit(text, (self.disp_size[0]//20, i*20+self.disp_size[1]//20))\n pygame.display.update()\n main_screen = True\n while main_screen:\n print(\"Waiting for start\")\n event = pygame.event.wait()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n main_screen = False\n self.display.fill([255, 255, 255])\n grid.draw(self.display)\n pygame.display.update()\n print(\"Now painting\")\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n break\n pos = list((np.array(pygame.mouse.get_pos())/self.block_size).astype(int))\n if pygame.mouse.get_pressed() == (1, 0, 0):\n print(\"Add wall at\", pos)\n grid[pos].type = \"WALL\"\n grid[pos].draw(self.display, self.block_size)\n elif pygame.mouse.get_pressed() == (0, 0, 1):\n print(\"remove wall from\", pos)\n grid[pos].type = \"ROAD\"\n grid[pos].draw(self.display, self.block_size)\n pygame.display.update()", "def __init__(self, shape_num):\n self.shape_num = shape_num\n if shape_num == 1:\n self.width = 4\n self.height = 4\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.grid[3][2] = 1\n self.color = Color.SilverPink\n elif shape_num == 2:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.TuftsBlue\n elif shape_num == 3:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[2][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.ChromeYellow\n elif shape_num == 4:\n self.width = 2\n self.height = 2\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][0] = 1\n self.grid[0][1] = 1\n self.grid[1][0] = 1\n self.grid[1][1] = 1\n self.color = Color.Independence\n elif shape_num == 5:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[1][0] = 1\n self.grid[2][0] = 1\n self.grid[0][1] = 1\n self.grid[1][1] = 1\n self.color = Color.ForestGreen\n elif shape_num == 6:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[1][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.Byzantine\n elif shape_num == 7:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][0] = 1\n self.grid[1][0] = 1\n self.grid[1][1] = 1\n self.grid[2][1] = 1\n self.color = Color.Coquelicot\n self.top_space = self.get_top_space()\n self.bottom_space = self.get_bottom_space()\n self.x = int((12 - self.width) / 2)\n self.y = 1 - self.top_space\n self.last_drop_time = perf_counter()", "def create_and_add_horiontal_walls_to_list(row_start: int, row_end: int, y: int, wall_list: arcade.SpriteList) -> None:\n #loop creation of wall sprites\n for x in range(row_start * wall_size, row_end * wall_size, wall_size):\n wall = arcade.Sprite(\":resources:images/tiles/boxCrate_double.png\", wall_scaling)\n wall.left = x\n wall.bottom = y * wall_size\n wall_list.append(wall)", "def makeRoom(cls, size, center, environment, wallThickness = 0.01):\n wallList = []\n sx,sy,sz = size\n cx, cy, cz = center\n t = wallThickness#*environment.lengthScale\n\n # top (y+)\n w = cls((sx, t, sz), (cx, cy+sy/2+t/2, cz), environment)\n wallList.append(w)\n\n #bottom (y-)\n w = cls((sx, t, sz), (cx, cy-sy/2-t/2, cz), environment)\n wallList.append(w)\n\n #right (x+)\n w = cls((t, sy, sz), (cx+sx/2+t/2, cy, cz), environment)\n wallList.append(w)\n\n #left (x-)\n w = cls((t, sy, sz), (cx-sx/2-t/2, cy, cz), environment)\n wallList.append(w)\n\n #front (z+)\n w = cls((sx, sy, t), (cx, cy, cz+sz/2+t/2), environment)\n wallList.append(w)\n\n #back (z-)\n w = cls((sx, sy, t), (cx, cy, cz-sz/2-t/2), environment)\n wallList.append(w)\n\n return wallList", "def blocks(self):\n step_phi = (2 * pi - 0) / (2 * self.meridians)\n phi_delta = (2 * pi - 0) / self.meridians\n theta_delta = (self.spring - self.oculus) / self.hoops\n\n phi = []\n for i in range(self.meridians + 1):\n phi.append(0 + i * phi_delta)\n\n theta = []\n for i in range(self.hoops + 1):\n theta.append(self.oculus + i * theta_delta)\n\n blocks = []\n\n for j in range(self.meridians):\n for i in range(self.hoops):\n if i % 2 == 0:\n step = 0\n else:\n step = step_phi\n\n vertices = []\n\n r = radius(self.r_i, self.r_f, self.spring, self.oculus, theta[i])\n R = radius(self.R_i, self.R_f, self.spring, self.oculus, theta[i])\n\n vertices.append(geom_dome(r, theta[i], phi[j] + step))\n vertices.append(geom_dome(R, theta[i], phi[j] + step))\n vertices.append(geom_dome(R, theta[i], phi[j + 1] + step))\n vertices.append(geom_dome(r, theta[i], phi[j + 1] + step))\n\n r = radius(self.r_i, self.r_f, self.spring, self.oculus, theta[i + 1])\n R = radius(self.R_i, self.R_f, self.spring, self.oculus, theta[i + 1])\n\n vertices.append(geom_dome(r, theta[i + 1], phi[j] + step))\n vertices.append(geom_dome(R, theta[i + 1], phi[j] + step))\n vertices.append(geom_dome(R, theta[i + 1], phi[j + 1] + step))\n vertices.append(geom_dome(r, theta[i + 1], phi[j + 1] + step))\n\n faces = [\n [0, 4, 5, 1],\n [1, 5, 6, 2],\n [0, 1, 2, 3],\n [0, 3, 7, 4],\n [5, 4, 7, 6],\n [6, 7, 3, 2],\n ]\n\n block = Mesh.from_vertices_and_faces(vertices, faces)\n blocks.append(block)\n\n return blocks", "def draw_rhombus(self, screen):\n pygame.gfxdraw.filled_polygon(screen, self.list_of_coordinates, self.color)\n\n return screen", "def render(self, window):\n body = pygame.image.load(IMAGE_SNAKE).convert_alpha() # loading image\n for block in self.body:\n window.blit(body, (block[0]*SPRITE_SIZE, block[1]*SPRITE_SIZE)) # painting a beautiful snek\n if self.neural_net: # calls for neural net rendering\n self.neural_net.render(window, self.vision)", "def west_edge(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n model = Plane(w=width, h=self.ceilingthickness, name=name+str(wallnum))\r\n mergeshape.add(model, w,y+height+self.ceilingthickness / 2,z,rx=0.0,ry=90.0,rz=0.0)\r\n\r\n wallnum += 1", "def draw_block(position, color):\n x = position.col*DX+DX+2\n y = position.row*DY+DY+2\n width = DX-4\n height = DY-4\n pygame.draw.rect(screen, color, (x,y,width,height), 0)", "def regular_polygon(sides, radius, height):\n global _cmds\n _cmds = \"}\\n\\n\" + _cmds\n for wedge in range(sides):\n p1 = _cart(radius, wedge*360/sides)\n p2 = _cart(radius, (wedge+1)*360/sides)\n triangle([0, 0], p1, p2, height)\n _cmds = \"union(){\\n\" + _cmds", "def draw_on(self, surface):\n for x, y in self.alive_cells():\n #size = (self.box_size, self.box_size)\n #position = (x * self.box_size, y * self.box_size)\n #thickness = 1\n pygame.draw.rect(surface, DARK_RED, (x * self.box_size, y * self.box_size,self.box_size, self.box_size ))", "def render(self):\n\n theta = self.angle*math.pi/180.0\n cth = math.cos(theta)\n sth = math.sin(theta)\n pts = []\n cornerpts = []\n\n for vertex in self.points:\n x = vertex[0] + self.pos[0] - self.anchor[0]\n y = vertex[1] + self.pos[1] - self.anchor[1]\n\n xt = x * cth - y * sth\n yt = x * sth + y * cth\n\n x = xt + self.anchor[0]\n y = yt + self.anchor[1]\n\n cornerpts.append([x,y])\n pts.append(gr.Point(self.scale * x, self.win.getHeight() - self.scale*y))\n\n self.corners = cornerpts\n self.vis = [gr.Polygon(pts)]", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))", "def drawShapes(self):\n self.draw_polygon(self.poly3.get_points() , color = \"#000\")\n self.draw_polygon(self.poly2.get_points() , color = \"#000\")\n self.draw_polygon(self.poly1.get_points() , color = \"#000\")\n self.draw_rect(0, 0, self.width, self.height, color= \"#000\")\n \"\"\"These statements are used to determine if a point is inside any of the\n 3 polygons and if so changes the point's color\"\"\"\n if (self.poly2.point_inside_polygon(self.p1) or self.poly1.point_inside_polygon(self.p1)\n or self.poly3.point_inside_polygon(self.p1)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p1.x, self.p1.y, 7, 7, color)\n\n if (self.poly2.point_inside_polygon(self.p2) or self.poly1.point_inside_polygon(self.p2)\n or self.poly3.point_inside_polygon(self.p2)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p2.x, self.p2.y, 7, 7, color)\n if (self.poly2.point_inside_polygon(self.p3) or self.poly1.point_inside_polygon(self.p3)\n or self.poly3.point_inside_polygon(self.p3)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p3.x, self.p3.y, 7, 7, color)", "def draw_world_state(self):\n self.fig.clf()\n t = time.time()\n subplot = self.fig.add_subplot(2,1,1)\n for w in self.world_model.walls:\n subplot.plot([w,w],[0,1],'b-')\n subplot.hold(True)\n subplot.set_xlim([min(self.walls)-0.2,max(self.walls)+.2])\n subplot.set_ylim([0,1])\n subplot.scatter([p.position for p in self.pf.particles],\n [0.5]*len(self.pf.particles),\n c='r',\n s=[p.weight**0.5*1000 for p in self.pf.particles])\n\n subplot.scatter([p.position for p in self.pf.particles],\n [0.2]*len(self.pf.particles),\n c='k',\n s=[10]*len(self.pf.particles))\n\n if self.true_position != None:\n subplot.scatter([self.true_position], [0.8], c='g', s=[100])\n\n histogram = self.fig.add_subplot(2,1,2)\n\n histogram.hist([p.position for p in self.pf.particles],\n weights=[p.weight for p in self.pf.particles],\n bins=np.arange(-0.5+min(self.walls),0.5+max(self.walls),.02))\n\n histogram.set_xlim([-.2+min(self.walls),0.2+max(self.walls)])\n histogram.set_ylim([0,1])\n plt.draw()\n plt.pause(.01)", "def draw_graph(vertices, radius, thickness):\n\n global screen\n for vtx in vertices:\n for neighbor in vtx[\"adjacent\"]:\n pygame.draw.line(screen, CRAYONBOX[\"BLACK\"], (vtx[\"x\"], vtx[\"y\"]),\n (vertices[neighbor][\"x\"], vertices[neighbor][\"y\"]), thickness)\n\n for vtx in vertices:\n pygame.draw.circle(screen, CRAYONBOX[\"BLACK\"], (vtx[\"x\"], vtx[\"y\"]), radius, thickness)\n pygame.draw.circle(screen, CRAYONBOX[\"WHITE\"], (vtx[\"x\"], vtx[\"y\"]), radius - thickness)", "def render(self, scene):\n if self.degenerate:\n return\n # The number of subdivisions around the hoop's radial direction.\n if self.thickness:\n band_coverage = scene.pixel_coverage(self.pos, self.thickness)\n else:\n band_coverage = scene.pixel_coverage(self.pos, self.radius * 0.1)\n if band_coverage < 0:\n band_coverage = 1000\n bands = sqrt(band_coverage * 4.0)\n bands = clamp(4, bands, 40)\n # The number of subdivisions around the hoop's tangential direction.\n ring_coverage = scene.pixel_coverage(self.pos, self.radius)\n if ring_coverage < 0:\n ring_coverage = 1000\n rings = sqrt(ring_coverage * 4.0)\n rings = clamp(4, rings, 80)\n slices = int(rings)\n inner_slices = int(bands)\n radius = self.radius\n inner_radius = self.thickness\n\n # Create the vertex and normal arrays.\n vertices = []\n normals = []\n\n outer_angle_step = 2 * pi / (slices - 1)\n inner_angle_step = 2 * pi / (inner_slices - 1)\n outer_angle = 0.\n for i in range(slices):\n cos_outer_angle = cos(outer_angle)\n sin_outer_angle = sin(outer_angle)\n inner_angle = 0.\n for j in range(inner_slices):\n cos_inner_angle = cos(inner_angle)\n sin_inner_angle = sin(inner_angle)\n\n diameter = (radius + inner_radius * cos_inner_angle)\n vertex_x = diameter * cos_outer_angle\n vertex_y = diameter * sin_outer_angle\n vertex_z = inner_radius * sin_inner_angle\n\n normal_x = cos_outer_angle * cos_inner_angle\n normal_y = sin_outer_angle * cos_inner_angle\n normal_z = sin_inner_angle\n\n vertices.extend([vertex_x, vertex_y, vertex_z])\n normals.extend([normal_x, normal_y, normal_z])\n inner_angle += inner_angle_step\n outer_angle += outer_angle_step\n\n # Create ctypes arrays of the lists\n vertices = (gl.GLfloat *len(vertices))(*vertices)\n normals = (gl.GLfloat * len(normals))(*normals)\n\n # Create a list of triangle indices.\n indices = []\n for i in range(slices - 1):\n for j in range(inner_slices - 1):\n pos = i * inner_slices + j\n indices.extend([pos, pos + inner_slices, pos + inner_slices +\n 1])\n indices.extend([pos, pos + inner_slices + 1, pos + 1])\n indices = (gl.GLuint * len(indices))(*indices)\n\n # Compile a display list\n self.list = gl.glGenLists(1)\n gl.glNewList(self.list, gl.GL_COMPILE)\n self.color.gl_set(self.opacity)\n\n gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n self.model_world_transform(scene.gcf,\n Vector([self.radius, self.radius,\n self.radius])).gl_mult()\n\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)\n gl.glNormalPointer(gl.GL_FLOAT, 0, normals)\n gl.glDrawElements(gl.GL_TRIANGLES, len(indices), gl.GL_UNSIGNED_INT,\n indices)\n gl.glPopClientAttrib()\n\n gl.glEndList()\n gl.glCallList(self.list)", "def _draw_bounce_block(self, instance: BounceBlock, shape: pymunk.Shape,\n view: tk.Canvas, offset: Tuple[int, int]) -> List[int]:\n # 'default' is a status where BounceBlock has not been triggered\n if instance.image() != 'default': # if triggered this block\n # the animation rate and image order is handled in BounceBlock\n image = self.load_image(\"bounce_used_\" + instance.image()) # changed image for animation\n return [view.create_image(shape.bb.center().x + offset[0], shape.bb.center().y,\n image=image, tags=\"block\")]\n else:\n image = self.load_image(\"bounce_block\") # it's 'default'\n return [view.create_image(shape.bb.center().x + offset[0], shape.bb.center().y,\n image=image, tags=\"block\")]", "def main():\n run_test_draw_upside_down_wall()", "def south_edge(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n model = Plane(w=length, h=self.ceilingthickness, name=name+str(wallnum))\r\n mergeshape.add(model, x,y+height+self.ceilingthickness / 2,s, rx=0.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def make_circle_fill():\n num_points = 40\n batch = pyglet.graphics.Batch()\n rad = math.pi * 2 / num_points # getting 360 / n in radians\n index = list(itertools.chain.from_iterable( (0, x-1, x) for x in range(2, num_points+1) ))\n index += [0, 1, num_points] # end of fan\n vertices = [0, 0] # adding center of fan\n for i in range(1, num_points + 1):\n angle = rad * i\n vertices += [math.cos(angle), math.sin(angle)]\n vertices += [1, 0] # adding end of fan\n circle = pyglet.graphics.vertex_list_indexed(num_points+2, index, ('v2f', vertices))\n return circle", "def draw_grid(self, darken=1):\n if not(0 < darken < 1):\n darken = 1\n for x in range(0, int(self.settings['grid_size'])):\n for y in range(0, int(self.settings['grid_size'])):\n if self.grid[x][y] == g.EMPTY:\n if (x + y) % 2 == 0:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (93 * darken, 216 * darken, 228 * darken), r)\n else:\n rr = pygame.Rect((x * self.block_width, y * self.block_width),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (84 * darken, 194 * darken, 205 * darken), rr)\n elif self.grid[x][y] == g.WALL:\n rr = pygame.Rect((x * self.block_width, y * self.block_width), (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (175 * darken, 34 * darken, 6 * darken), rr)\n elif self.grid[x][y] == g.PLAYER:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (17 * darken, 24 * darken, 47 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)\n elif self.grid[x][y] == g.FOOD:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (223 * darken, 163 * darken, 49 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf", "def draw(self):\n\t\tself.screen.fill(pygame.Color('black'))\n\t\tfor column in self.model.blocks:\n\t\t\tfor block in column:\n\t\t\t\tr = pygame.Rect(block.left,\n\t\t\t\t\t\t\t\tblock.top,\n\t\t\t\t\t\t\t\tblock.size,\n\t\t\t\t\t\t\t\tblock.size)\n\t\t\t\tpygame.draw.rect(self.screen, block.color,r)\n\t\tpygame.display.update()", "def create_circular_wall(center: np.ndarray, radius: float, n_walls: int = 100):\n\n d_angle = 2*np.pi/n_walls\n list_of_segments = []\n for i in range(n_walls):\n init_point = np.array([radius*np.cos(d_angle*i), radius*np.sin(d_angle*i)]) + center\n end_point = np.array([radius*np.cos(d_angle*(i+1)), radius*np.sin(d_angle*(i+1))]) + center\n wall = np.stack([init_point, end_point])\n list_of_segments.append(wall)\n return list_of_segments" ]
[ "0.6985171", "0.6947199", "0.689782", "0.6734781", "0.6710795", "0.67082715", "0.66530377", "0.6573703", "0.6563552", "0.64633447", "0.64461184", "0.6377385", "0.63731766", "0.63545173", "0.6318463", "0.63116866", "0.6260402", "0.62379223", "0.6211161", "0.61905986", "0.61559576", "0.6143857", "0.6141515", "0.61299866", "0.61172783", "0.61165136", "0.60922706", "0.6076112", "0.60575217", "0.6042645", "0.6038022", "0.6035101", "0.6003108", "0.59587765", "0.59485406", "0.59167045", "0.59094733", "0.590438", "0.59016", "0.5877749", "0.58720297", "0.5868326", "0.5864827", "0.58506787", "0.58506787", "0.58417207", "0.5830449", "0.5829046", "0.58148515", "0.58069223", "0.5797699", "0.57878155", "0.5786955", "0.5779143", "0.5778223", "0.57635975", "0.5756299", "0.5754037", "0.57496136", "0.57454836", "0.5733999", "0.57299185", "0.5723986", "0.57212514", "0.5707752", "0.56949395", "0.5691998", "0.5691041", "0.56885904", "0.5687352", "0.56855184", "0.5684734", "0.56821597", "0.56820774", "0.5680211", "0.56754667", "0.5661198", "0.5644068", "0.56440526", "0.5642159", "0.5633881", "0.5631973", "0.5628526", "0.56274426", "0.5626486", "0.5626321", "0.56226397", "0.5621204", "0.56161326", "0.5612129", "0.5607054", "0.5606724", "0.55987287", "0.55979866", "0.5585803", "0.55822617", "0.55785626", "0.5570912", "0.55699885", "0.55669004" ]
0.7463419
0
Remap the coordinates from the grid to positions required by arcade. As 0,0 is the bottom left position in arcade, but the grid see's 0,0 as top left
def get_entity_dimensions(self, entity: Entity): top_left = list(entity.top_left) top_left[1] = SCREEN_HEIGHT - top_left[1] top_right = list(entity.top_right) top_right[1] = SCREEN_HEIGHT - top_right[1] bottom_left = list(entity.bottom_left) bottom_left[1] = SCREEN_HEIGHT - bottom_left[1] bottom_right = list(entity.bottom_right) bottom_right[1] = SCREEN_HEIGHT - bottom_right[1] return top_left, top_right, bottom_right, bottom_left
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def initialCoordinates():\r\n return (-250,-250)", "def position_grid(self):\n return self.currentLevel.toGridCoord(self.position)", "def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)", "def world_to_grid(mapdata, wp):\n WX = wp.x\n WY = wp.y\n resol = mapdata.info.resolution\n # -0.5 but coordinates to center\n gx = math.floor((WX - mapdata.info.origin.position.x) / resol - 0.5)\n gy = math.floor((WY - mapdata.info.origin.position.y) / resol - 0.5)\n return gx, gy", "def update_positions(self, grid):\r\n self.grid = grid", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def get_relative_grid(self, xgbl, ygbl):\r\n self.rx = xgbl - self.x0\r\n self.ry = ygbl - self.y0\r\n\r\n # flatten the coordinates:\r\n self.rx = self.rx.ravel()\r\n self.ry = self.ry.ravel()", "def adjust_grid(route, grid):\n\n for location in route:\n # If the position in the route is the end destination, dont make it a 1. \n if location == route[-1]:\n grid[location[0]][location[1]][location[2]] = 0\n\n # Else if the location on the grid is not a 1, make it one.\n elif grid[location[0]][location[1]][location[2]] == 0 or grid[location[0]][location[1]][location[2]] == 'x' or grid[location[0]][location[1]][location[2]] == 'y':\n grid[location[0]][location[1]][location[2]] = 1\n else:\n continue\n\n return grid", "def _cal_grid_coordinates(self, nc_handle):\n print(\"calculating grid coordinates\")\n #\n x = np.zeros(self._grid[\"counts\"][0], dtype=float)\n y = np.zeros(self._grid[\"counts\"][1], dtype=float)\n z = np.zeros(self._grid[\"counts\"][2], dtype=float)\n \n for i in range(self._grid[\"counts\"][0]):\n x[i] = self._grid[\"origin\"][0] + i*self._grid[\"d0\"][0]\n\n for j in range(self._grid[\"counts\"][1]):\n y[j] = self._grid[\"origin\"][1] + j*self._grid[\"d1\"][1]\n\n for k in range(self._grid[\"counts\"][2]):\n z[k] = self._grid[\"origin\"][2] + k*self._grid[\"d2\"][2]\n\n self._set_grid_key_value(\"x\", x)\n self._set_grid_key_value(\"y\", y)\n self._set_grid_key_value(\"z\", z)\n\n for key in [\"x\", \"y\", \"z\"]:\n self._write_to_nc(nc_handle, key, self._grid[key])\n return None", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def get_grid_position(self):\n tile_size_x = constants.WINDOW_WIDTH / constants.GRID_TILE_LENGTH\n tile_size_y = constants.WINDOW_HEIGHT / constants.GRID_TILE_LENGTH\n grid_x = tile_size_x / self.host.x\n grid_y = tile_size_y / self.host.y\n return grid_x, grid_y", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def map_grid_loc_to_pixel((grid, x, y), panel_dimensions = bm_panel_dimensions, xc = 17.25, yc = 630, run = 17.25):\n x_offset = 0\n for panel_index, panel_dim in panel_dimensions.iteritems():\n if panel_index < grid:\n width, height = panel_dim\n x_offset += width*xc\n xp, yp = xc + x*run + x_offset, yc - y*run\n return (xp, yp)", "def convert_grid_to_z_up(scene): # pragma nocover\n\n '''\n There is an interaction between up and forward, the direction that the\n camera is pointing. By default, the camera points in the -z direction\n vector(0,0,-1). In this case, you can make the x or y axes (or anything\n between) be the up vector, but you cannot make the z axis be the up\n vector, because this is the axis about which the camera rotates when\n you set the up attribute. If you want the z axis to point up, first set\n forward to something other than the -z axis, for example vector(1,0,0).\n https://www.glowscript.org/docs/VPythonDocs/canvas.html\n '''\n # First set the x-axis forward\n scene.forward = x_axis_vector\n scene.up = z_axis_vector\n\n # Place the camera in the + axes\n scene.camera.pos = vector(10, 10, 10)\n scene.camera.axis = -scene.camera.pos\n return", "def grid_to_world(mapdata, x, y):\n initMAPX = mapdata.info.origin.position.x\n initMAPY = mapdata.info.origin.position.y\n resol = mapdata.info.resolution\n WX = ((x + 0.5) * resol + initMAPX)\n WY = ((y + 0.5) * resol + initMAPY)\n\n pt = Point()\n pt.x = WX\n pt.y = WY\n pt.z = 0.0\n return pt", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def _move_receptor_to_grid_center(self):\n lower_receptor_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float)\n upper_receptor_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float)\n \n receptor_box_center = (upper_receptor_corner + lower_receptor_corner) / 2.\n grid_center = (self._origin_crd + self._uper_most_corner_crd) / 2.\n displacement = grid_center - receptor_box_center\n\n print(\"Receptor is translated by \", displacement)\n\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n return None", "def get_board_coordinates(self):\n\n temp_board = copy.deepcopy(self.get_board())\n board_columns = self.get_board_columns()\n board_rows = self.get_board_rows()\n\n for board_row in board_rows:\n\n for board_column in board_columns:\n \n index_column, index_row = self.transpose_position(board_column + board_row)\n temp_board[index_row][index_column] = (str(index_row)+str(index_column), board_column + board_row)\n\n return '\\n'.join(map(str, temp_board))", "def latlon_2_grid(x, y, z, origin):\n new_y = (y - origin[1]) * 111111\n new_x = (x - origin[0]) * (111111 * np.cos(origin[1] * (np.pi/180)))\n return new_x, new_y, z", "def position_to_grid(i, j):\n i -= i % SPACING - X % SPACING\n j -= j % SPACING - Y % SPACING\n return [i, j]", "def push_up (grid):\r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0\r\n #joining like numbers \r\n for i in range(3): \r\n for j in range(4): \r\n if grid[i][j]==grid[i+1][j]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i+1][j]=0\r\n #pafter adding the numbers continue to move them \r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0", "def push_down (grid):\r\n for i in range (3):\r\n for row in range(2,-1, -1):\r\n for col in range(4):\r\n if grid[row+1][col] == 0:\r\n grid[row+1][col] = grid[row][col]\r\n grid[row][col] = 0 \r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == grid[row][col]:\r\n grid[row-1][col] = grid[row-1][col]*2\r\n grid[row][col]=0 \r\n for i in range (3):\r\n for row in range(2,-1, -1):\r\n for col in range(4):\r\n if grid[row+1][col] == 0:\r\n grid[row+1][col] = grid[row][col]\r\n grid[row][col] = 0 \r\n return grid", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def init_grid(self):\n self.pts = np.array(\n np.meshgrid(\n np.arange(self.net_dim[0]) + 1,\n np.arange(self.net_dim[1]) + 1\n )\n ).reshape(2, np.prod(self.net_dim)).T\n if self.topo == \"hexagonal\":\n self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2)\n self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1]", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def compute_coordinates(self):\n self._x, self._y = self.board.index_to_coordinates(self.index)", "def get_grid_coordinate(self):\n return (int(self.position.x // (2 * Molecule.radius)),\n int(self.position.y // (2 * Molecule.radius)))", "def push_left (grid):\r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0\r\n \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j]==grid[i][j+1]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i][j+1]=0 \r\n \r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0", "def map_coordinates(self,geometry):\n\t\tg = self.geomatrix\n\t\tdef project_coord(x,y,z=None):\n\t\t\tx = g[0] + g[1] * x + g[2] * y\n\t\t\ty = g[3] + g[4] * x + g[5] * y\n\t\t\tif z is None:\n\t\t\t\treturn x,y\n\t\t\telse:\n\t\t\t\treturn x,y,z\n\t\treturn transform(project_coord, geometry)", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def setup_positions(self):\n x, y = np.meshgrid(np.arange(self.img.shape[1]), np.arange(self.img.shape[0]))\n x = x[self.img > 0]\n y = y[self.img > 0]\n self.X = np.array([x, y]).T\n N = x.size\n pos2idx = {(x[i], y[i]):i for i in range(x.size)}\n neighbors = [[i] for i in range(N)]\n for i in range(N):\n xi = x[i]\n yi = y[i]\n for (dx, dy) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n neighb = (xi+dx, yi+dy)\n if neighb in pos2idx:\n neighbors[i].append(pos2idx[neighb])\n self.pos2idx = pos2idx\n self.neighbors = neighbors", "def push_up (grid):\r\n for i in range (3):\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == 0:\r\n grid[row-1][col] = grid[row][col]\r\n grid[row][col] = 0\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == grid[row][col]:\r\n grid[row-1][col] = grid[row-1][col]*2\r\n grid[row][col]=0\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == 0:\r\n grid[row-1][col] = grid[row][col]\r\n grid[row][col] = 0\r\n \r\n return grid", "def get_gridState(self,real_state=None):\r\n if real_state is None:\r\n # convert unit back to mm\r\n r_state =[self.real_state[0],self.real_state[1],self.real_state[2]]\r\n else:\r\n r_state = real_state\r\n\r\n grid_state = [0,0,0]\r\n # assume orignal point is the center of the starting piece\r\n for i in range(2):\r\n if np.abs(real_state[i]) < (self.grid_size)/2.0:\r\n grid_state[i] =0\r\n else:\r\n # tol = round(self.grid_size/10.0, 2) #tolerance of floating point number\r\n tol =15.0 #1.5cm as tolerance\r\n grid_state[i] = int((real_state[i]-(self.grid_size)/2.0)//self.grid_size)\r\n remain = 1 if (real_state[i]-(self.grid_size)/2.0)%self.grid_size >=tol else 0\r\n grid_state[i] += remain\r\n\r\n # convert rad to degree\r\n real_angle = 180.0*(real_state[2]/math.pi)\r\n a = [abs(real_angle - i) for i in self.angle_set]\r\n i = np.argmin(a)\r\n # calculate approximate degree in grid world\r\n grid_state[2] = self.angle_set[i]\r\n # print(\"grid angle:\",grid_state[2],\"real ag:\",real_angle)\r\n\r\n # correct the degree in rad for moving\r\n # agl_rad = grid_state[2]*(math.pi/180.0)\r\n # correct_rot = -(real_state[2] - agl_rad)\r\n # sp = 100\r\n # t = (correct_rot* (math.pi/180))/sp\r\n # cur_t = time.time()\r\n # past_t = cur_t\r\n # while abs(past_t-cur_t) <=t+0.5:\r\n # self.Roomba.Move(0,sp)\r\n # cur_t = time.time()\r\n # self.Roomba.Move(0,0)\r\n\r\n return grid_state", "def createGrid(nx, ny, include_center = False):\n direction = 0\n positions = []\n if (nx > 1) or (ny > 1):\n half_x = int(nx/2)\n half_y = int(ny/2)\n for i in range(-half_y, half_y+1):\n for j in range(-half_x, half_x+1):\n if ((i==0) and (j==0)) and not include_center:\n continue\n else:\n if ((direction%2)==0):\n positions.append([j,i])\n else:\n positions.append([-j,i])\n direction += 1\n return positions", "def get_grid_position(self, as_int=True):\n if as_int:\n return (\n int(self.x // Constant.TILE_SIZE),\n int(self.y // Constant.TILE_SIZE),\n )\n else:\n return (\n self.x / Constant.TILE_SIZE,\n self.y / Constant.TILE_SIZE,\n )", "def coordinates(self):", "def convertGridNodes2ncsp(x0, y0, azi, xPos, yPos):\n\n # calculating change in alongshore coordinate for northing and easting\n # given the associated dx dy\n E_j = yPos * np.cos(np.deg2rad(azi + 90))\n N_j = yPos * np.sin(np.deg2rad(azi + 90))\n # calculating change in cross-shore coordinate for northing and easting\n E_i = xPos * np.cos(np.deg2rad(azi))\n N_i = xPos * np.sin(np.deg2rad(azi))\n # add em all up.\n easting = x0 + E_j + E_i\n northing = y0 + N_j + N_i\n\n return easting, northing", "def get_grid_locations(self, top_left, other_pos):\n cell_x = torch.floor(((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *self.grid_size)\n\n # Added this part to implementation, otherwise the pooling is going to run into an indexing error\n cell_x[cell_x == self.grid_size] -= 1\n cell_y = torch.floor(((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *self.grid_size)\n cell_y[cell_y == self.grid_size] -= 1\n grid_pos = cell_x + cell_y * self.grid_size\n\n return grid_pos", "def adjust_grid(grid,direction):\n height = len(grid)\n width = len(grid[0])\n if direction == RIGHT or direction == LEFT:\n new_grid = [[0 for dummy_i in range(width)] for dummy_j in range(height)]\n for row in range(height):\n for col in range(width):\n if direction == LEFT:\n new_grid[row][col] = grid[row][col]\n else:\n new_grid[row][col] = grid[row][width - 1 - col]\n else:\n new_grid = [[0 for dummy_i in range(height)] for dummy_j in range(width)]\n for row in range(width):\n for col in range(height):\n if direction == UP:\n new_grid[row][col] = grid[col][row]\n else:\n new_grid[row][col] = grid[col][width - 1 - row]\n return new_grid", "def generate_all_locations(grid, shape):", "def initialize_grid(self):\r\n for i in range(self.height):\r\n for j in range(self.width):\r\n self.grid[i][j] = 0\r\n \r\n # fill up unvisited cells\r\n for r in range(self.height):\r\n for c in range(self.width):\r\n if r % 2 == 0 and c % 2 == 0:\r\n self.unvisited.append((r,c))\r\n\r\n self.visited = []\r\n self.path = dict()\r\n self.generated = False", "def _align_toplevel_grid(self):\n\n # align origin with nearest multple of 128\n self.mins[0] -= self.mins[0] % 128\n self.mins[1] -= self.mins[1] % 128\n\n width = self.maxs[0] - self.mins[0]\n height = self.maxs[1] - self.mins[1]\n greatest_dim = max(width, height)\n nearest_pow_two = int(2 ** np.ceil(np.log2(greatest_dim)))\n width_adjustment = (nearest_pow_two - width)\n height_adjustment = (nearest_pow_two - height)\n\n self.maxs[0] += width_adjustment\n self.maxs[1] += height_adjustment", "def setSystemGrid(self):\n fromSystem = self.myGalaxy.systems[self.fromSystem]\n toSystem = self.myGalaxy.systems[self.toSystem]\n self.systemGrid = funcs.getMapQuadrant(toSystem, self, fromSystem.x, fromSystem.y,\n toSystem.x, toSystem.y)", "def snap_to_grid(offset):\n xoff = snap_val_to_grid(offset[0])\n yoff = snap_val_to_grid(offset[1]) \n return vector(xoff, yoff)", "def collide_grid(self):\n topleft = self.absolute_collide_topleft\n bottomright = self.absolute_collide_bottomright\n tlx, tly = self.currentLevel.toGridCoord(topleft)\n brx, bry = self.currentLevel.toGridCoord(bottomright)\n collide_grid = []\n for x in range(tlx, brx+1):\n for y in range(tly, bry+1):\n collide_grid.append( (x,y) )\n if not collide_grid:\n collide_grid = [(tlx,tly)]\n return collide_grid", "def push_down (grid):\r\n for a in range(4): \r\n for i in range(2,-1,-1): \r\n for j in range(4): \r\n if grid[i+1][j]==0: \r\n grid[i+1][j]=grid[i][j] \r\n grid[i][j]=0\r\n \r\n for i in range(2,-1,-1): \r\n for j in range(4): \r\n if grid[i][j]==grid[i+1][j]: \r\n grid[i+1][j]=(grid[i+1][j])*2\r\n grid[i][j]=0\r\n \r\n for a in range(4): \r\n for i in range(2,-1,-1): \r\n for j in range(4): \r\n if grid[i+1][j]==0: \r\n grid[i+1][j]=grid[i][j] \r\n grid[i][j]=0", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()", "def find_moves_on_axis(grid):\n moves = {}\n for i, _ in enumerate(grid):\n row_moves = list(moves_for_row(grid, i))\n if row_moves:\n moves[i] = row_moves\n return moves", "def add_adj_nodes(self):\n\n for x, row in enumerate(self.grid):\n for y, cell in enumerate(row):\n if x-1 >= 0:\n cell.above = self.grid[x-1][y]\n if y+1 < len(self.grid[0]):\n cell.right = self.grid[x][y+1]\n if x+1 < len(self.grid):\n cell.below = self.grid[x+1][y]\n if y-1 >= 0:\n cell.left = self.grid[x][y-1]", "def makeLocationMap(self):\n\t\tlocationMap = [[(0,0) for i in range(self.numRects)] for j in range(self.numRects)]\n\t\tstartTop = 0.5*(self.height-self.numRects*self.angle*self.rectWidth)\n\t\tstartLeft = (self.width/2)-(self.numRects/2)*self.rectWidth\n\t\tfor row in enumerate(self.makeIndexMap()):\n\t\t\tfor col in row[1]:\n\t\t\t\tlocationMap[col[0]][col[1]] = \\\n\t\t\t\t\t(startLeft+(col[0]+col[1])*self.rectWidth/2,\n\t\t\t\t\tstartTop+(row[0]+1)*0.5*self.angle*self.rectWidth)\n\t\treturn locationMap", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n self.dk = 2 * np.pi/self.grid_width\n self.grid_x_shifted = -self.grid_width/2 + self.dx * np.arange(0, self.grid_resol)\n self.grid_x = self.grid_x_shifted + self.grid_center\n self.grid_k = - (np.pi * self.grid_resol)/self.grid_width + self.dk * np.arange(0, self.grid_resol)\n self.grid_k = np.roll(self.grid_k, int((self.grid_resol)/2))\n self.grid_kin = np.square(self.h)/ (2*self.m) * np.square(self.grid_k)", "def calculate_positions(self):\n return {cell: (cell.column, -cell.row) for cell in self.game.get_cells()}", "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n #print(north_min, north_max)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n #print(east_min, east_max)\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n #print(north_size, east_size)\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n \n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min_center),\n int(north + d_north + safety_distance - north_min_center),\n int(east - d_east - safety_distance - east_min_center),\n int(east + d_east + safety_distance - east_min_center),\n ]\n grid[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3]] = 1\n\n return grid", "def draw_grid(self, min_x, max_x, min_y, max_y, min_z, max_z) -> None:\n from pymol import cmd\n from math import sin, cos\n \n # Prepare dimensions\n angle1 = 0.0\n angle2 = 0.0\n min_x = x - min_x\n max_x = max_x - x \n min_y = y - min_y \n max_y = max_y - y \n min_z = z - min_z \n max_z = max_z - z \n\n # Get positions of grid vertices\n # P1\n x1 = -min_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y1 = -min_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z1 = min_x * sin(angle2) + min_y * sin(angle1) * cos(angle2) - min_z * cos(angle1) * cos(angle2) + z\n \n # P2\n x2 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y2 = (-min_y) * cos(angle1) + (-min_z) * sin(angle1) + y\n \n z2 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P3\n x3 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y3 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z3 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P4\n x4 = (-min_x) * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y4 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z4 = -(-min_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n \n # P5\n x5 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y5 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z5 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P6\n x6 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y6 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z6 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n \n # P7\n x7 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y7 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z7 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n # P8\n x8 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y8 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z8 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z \n\n # Create box object\n if \"grid\" in cmd.get_names(\"objects\"):\n cmd.delete(\"grid\")\n\n # Create vertices\n cmd.pseudoatom(\"grid\", name=\"v2\", pos=[x2, y2, z2], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v3\", pos=[x3, y3, z3], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v4\", pos=[x4, y4, z4], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v5\", pos=[x5, y5, z5], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v6\", pos=[x6, y6, z6], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v7\", pos=[x7, y7, z7], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v8\", pos=[x8, y8, z8], color=\"white\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1x\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v2x\", pos=[x2, y2, z2], color='white')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1y\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v3y\", pos=[x3, y3, z3], color='white')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v4z\", pos=[x4, y4, z4], color='white')\n cmd.pseudoatom(\"grid\", name=\"v1z\", pos=[x1, y1, z1], color='white')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def update_grid_numbers(focal_point, numbers_list, num_squares, scene):\n\n # Initial conditions\n padding = 0.25 # Padding to not draw numbers on top of lines.\n camera_axes = scene.camera.axis\n # Locate center of the axes\n x_origin, y_origin, z_origin = focal_point[0], focal_point[1], focal_point[2]\n\n # CAMERA AXES | DISPLAYED GRID | XZ PLANE | XY PLANE | YZ PLANE\n # x,y,z | x,y,z | x,z | x,y | y,z\n # -------------+-----------------+----------+----------+----------\n # -,-,- | +,+,+ | +,+ | +,+ | +,+\n # -,-,+ | +,+,- | +,- | +,+ | +,-\n # -,+,- | +,-,+ | +,+ | +,- | -,+\n # -,+,+ | +,-,- | +,- | +,- | -,-\n # +,-,- | -,+,+ | -,+ | -,+ | +,+\n # +,-,+ | -,+,- | -,- | -,+ | +,-\n # +,+,- | -,-,+ | -,+ | -,- | -,+\n # +,+,+ | -,-,- | -,- | -,- | -,-\n # min = -num_squares or 0, around the default position\n # max = +num_squares or 0, around the default position\n # e.g. at the origin, for negative axes: -10 -> 0, positive axes: 0 -> 10\n min_x_coord = x_origin + int(-(num_squares / 2) + (sign(camera_axes.x) * -1) * (num_squares / 2))\n max_x_coord = x_origin + int((num_squares / 2) + (sign(camera_axes.x) * -1) * (num_squares / 2))\n\n min_y_coord = y_origin + int(-(num_squares / 2) + (sign(camera_axes.y) * -1) * (num_squares / 2))\n max_y_coord = y_origin + int((num_squares / 2) + (sign(camera_axes.y) * -1) * (num_squares / 2))\n\n min_z_coord = z_origin + int(-(num_squares / 2) + (sign(camera_axes.z) * -1) * (num_squares / 2))\n max_z_coord = z_origin + int((num_squares / 2) + (sign(camera_axes.z) * -1) * (num_squares / 2))\n\n # If input is empty, append new, otherwise update current\n append = len(numbers_list) == 0\n # Dimensions don't change between updates, so indexing shall remain the same\n index = 0\n\n # X plane\n for x_pos in range(min_x_coord, max_x_coord + 1):\n # Draw the corresponding unit number at each x coordinate\n txt = str(x_pos)\n pos = vector(x_pos + padding, y_origin + padding, z_origin)\n if append:\n numbers_list.append(draw_text(txt, pos, scene))\n else:\n numbers_list[index].text = txt\n numbers_list[index].pos = pos\n index += 1\n # Draw the axis label at either the positive or negative side away from center\n # If sign = -1, draw off max side, if sign = 0 or 1, draw off negative side\n txt = \"X\"\n if (sign(camera_axes.x) * -1) > 0:\n pos = vector(max_x_coord + 1, y_origin, z_origin)\n else:\n pos = vector(min_x_coord - 1, y_origin, z_origin)\n if append:\n numbers_list.append(draw_text(txt, pos, scene))\n else:\n numbers_list[index].text = txt\n numbers_list[index].pos = pos\n index += 1\n\n # Y plane\n for y_pos in range(min_y_coord, max_y_coord + 1):\n # Draw the corresponding unit number at each x coordinate\n txt = str(y_pos)\n pos = vector(x_origin, y_pos + padding, z_origin + padding)\n if append:\n numbers_list.append(draw_text(txt, pos, scene))\n else:\n numbers_list[index].text = txt\n numbers_list[index].pos = pos\n index += 1\n # Draw the axis label at either the positive or negative side away from center\n # If sign = -1, draw off max side, if sign = 0 or 1, draw off negative side\n txt = \"Y\"\n if (sign(camera_axes.y) * -1) > 0:\n pos = vector(x_origin, max_y_coord + 1, z_origin)\n else:\n pos = vector(x_origin, min_y_coord - 1, z_origin)\n if append:\n numbers_list.append(draw_text(txt, pos, scene))\n else:\n numbers_list[index].text = txt\n numbers_list[index].pos = pos\n index += 1\n\n # Z plane\n for z_pos in range(min_z_coord, max_z_coord + 1):\n # Draw the corresponding unit number at each x coordinate\n txt = str(z_pos)\n pos = vector(x_origin, y_origin - padding, z_pos + padding)\n if append:\n numbers_list.append(draw_text(txt, pos, scene))\n else:\n numbers_list[index].text = txt\n numbers_list[index].pos = pos\n index += 1\n # Draw the axis label at either the positive or negative side away from center\n # If sign = -1, draw off max side, if sign = 0 or 1, draw off negative side\n txt = \"Z\"\n if (sign(camera_axes.z) * -1) > 0:\n pos = vector(x_origin, y_origin, max_z_coord + 1)\n else:\n pos = vector(x_origin, y_origin, min_z_coord - 1)\n if append:\n numbers_list.append(draw_text(txt, pos, scene))\n else:\n numbers_list[index].text = txt\n numbers_list[index].pos = pos\n index += 1", "def create_initial_grid():\n\n\tgrid = {(x, y) : ' + ' for x in range(8) for y in range(8)}\n\n\t# Define initial positions \n\tgrid[(3,3)] = colors.RED + \"[I]\" + colors.STOP\n\tgrid[(4,3)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(3,4)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(4,4)] = colors.RED + \"[I]\" + colors.STOP\n\n\treturn grid", "def position(self, grid_instance):\n xy_position = [elt.xy_position for elt in grid_instance.cells if \\\n elt.cell_type == 3]\n self.xy_position = xy_position[0]", "def grid(iant,xgrid=[0],ygrid=[0],sleep=4):\n d=Carma(iant).drive()\n d.setOffset(xgrid[0],ygrid[0])\n time.sleep(sleep)\n time.sleep(sleep)\n for y in ygrid:\n for x in xgrid:\n print x,y\n d.setOffset(x,y)\n time.sleep(sleep)", "def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)", "def mouse_to_grid( pos ):\n mx,my=pos\n # account for window border and gap between cells\n ix = int((mx-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n iy = int((my-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n # force respect window borders\n if ix<0 or ix>=GRID_X or iy<0 or iy>=GRID_Y:\n return None\n else:\n return (ix,iy)", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def regrid_longitude_coord(self, cube):\n # make a list with the 'longitude' coord in the form: 0/180/-180/0\n neg_lons = ((cube.coord(\"longitude\").points + 180) % 360) - 180\n # interpolates the cube data to the new 'longitude' dimensions\n cube = cube.interpolate([(\"longitude\", neg_lons)],\n iris.analysis.Linear())\n sorted_cube = self.sorted_dim(cube)\n return sorted_cube", "def init(self, windowsize:tuple):\r\n y_count, x_count = 3, 0 #< Set the starting counter for the look_up_table. y starts with three because the first three lines are just Nones\r\n # Creating the constant maze \r\n maze_size = windowsize[0], windowsize[1] - 2 * self.grid_size\r\n self.maze = pg.Surface(maze_size) \r\n \r\n \r\n \r\n # Draw the outermost rectangles on self.maze\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0, 3 * self.grid_size), (28 * self.grid_size, 31 * self.grid_size)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0 + self.grid_size // 2, 3 * self.grid_size + self.grid_size // 2),(27 * self.grid_size, 30 * self.grid_size)), 4) \r\n # Draw the inner rectangles\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is rectangle\r\n if x != None and x[0] == 'r':\r\n # When the size of the string is equal or greater than 4 it's rectangle with a specific size and not just a border.\r\n if len(x) >= 4:\r\n # get the x and y size of the rectangle. x will be something like 'rx1_y1' x1 resprestens the size in x direction and y1 in y direction.\r\n xy_dim = x[1:].split(\"_\") \r\n xy_dim[0] = int(xy_dim[0])\r\n xy_dim[1] = int(xy_dim[1])\r\n rect = tuple(pos), (xy_dim[0] * self.grid_size , xy_dim[1] * self.grid_size )\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], rect, self.width)\r\n # If the last char is a w (white), u (up) or l (left) a line gets draw one a specific position \r\n if x[-1] == 'w':\r\n self.draw_line(self.maze, 'u', (x_count,y_count), True)\r\n if x[-1] == 'u' or x[-1] == 'l':\r\n if x_count == 0:\r\n self.draw_line(self.maze, x[-1], (len(y), y_count))\r\n else:\r\n self.draw_line(self.maze, x[-1], (x_count, y_count))\r\n \r\n y_count += 1\r\n x_count = 0\r\n # Just some cosmetic drawing\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((0, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((28 * self.grid_size - self.grid_size // 2 - 1, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 13 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 19 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 13 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 19 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((11 * self.grid_size, 16 * self.grid_size), (6 * self.grid_size, 3 * self.grid_size)), self.width)\r\n \r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size // 2 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 18 * self.grid_size + self.grid_size // 2), (self.grid_size // 2 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size * 28 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 18 * self.grid_size + self.grid_size // 2), (self.grid_size * 28 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n self.is_init = True", "def push_right (grid): \r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j+1]==0: \r\n grid[i][j+1]=grid[i][j] \r\n grid[i][j]=0\r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j]==grid[i][j-1]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i][j-1]=0\r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j+1]==0: \r\n grid[i][j+1]=grid[i][j] \r\n grid[i][j]=0", "def swipeBase (self) :\n grid = self.grid\n\n #we start by putting every tile up\n for columnNbr in range(4) :\n nbrZeros = 4 - np.count_nonzero(grid[:,columnNbr])\n\n for lineNbr in range(4) :\n counter = 0\n while (grid[lineNbr, columnNbr] == 0) and (counter < 4):\n counter += 1\n if np.count_nonzero(grid[lineNbr:4, columnNbr]) != 0 :\n for remainingLine in range (lineNbr, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n #now we do the additions\n for lineNbr in range(3) :\n if grid[lineNbr, columnNbr] == grid[lineNbr+1, columnNbr] :\n grid[lineNbr, columnNbr] *= 2\n for remainingLine in range (lineNbr+1, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n return (grid)", "def mapToCoordinates(self, shot):\r\n toks = shot.split(\"-\")\r\n return Coordinates(ord(toks[0]) - ord(\"A\"), int(toks[1]) - 1)", "def _calc_coords(self):\n i = num.outer(num.arange(self.size[0]), num.ones(self.size[1]))\n i0 = self.pos[0] + (i * self.space[0])\n i1 = self.pos[0] + (i * self.space[0]) + self.bub[0]\n\n j = num.outer(num.ones(self.size[0]), num.arange(self.size[1]))\n j0 = self.pos[1] + (j * self.space[1])\n j1 = self.pos[1] + (j * self.space[1]) + self.bub[1]\n\n self.coords = num.dstack((i0, i1, j0, j1)).astype('i')", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)", "def draw_grid(self):\n if self.grid_center == True:\n (n, m) = (self.n, self.m)\n (dx, dy) = (self.dx // 2, self.dy // 2)\n else:\n (n, m) = (self.n + 1, self.m + 1)\n (dx, dy) = (0, 0)\n\n x0 = self.x0 + dx\n y0 = self.y0 + dy\n\n # vertical lines\n for j in range(m):\n p0 = (x0 + j * self.dx, y0)\n p1 = (x0 + j * self.dx, y0 + (n-1) * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d) \n # horizontal lines\n for i in range(n):\n p0 = (x0, y0 + i * self.dy)\n p1 = (x0 + (m-1) * self.dx, y0 + i * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d)", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)", "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.amin(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.amax(data[:, 0] + data[:, 3]))\n print(0, north_max - north_min)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.amin(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.amax(data[:, 1] + data[:, 4]))\n print(0, east_max - east_min)\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n print(data.shape[0])\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n # Determine which cells contain obstacles\n nc = int(north - north_min)\n ec = int(east - east_min)\n dn = int(d_north)\n de = int(d_east)\n sd = int(safety_distance)\n x0 = int(ec - (de + sd))\n y0 = int(nc - (dn + sd))\n xm = int(ec + (de + sd))\n ym = int(nc + (dn + sd))\n nm = north_max - north_min\n em = east_max - east_min\n for e in range(x0, xm):\n for n in range(y0, ym):\n # skip out of range conditions\n if e < 0:\n continue\n if e >= em:\n continue\n if n < 0:\n continue\n if n >= nm:\n continue\n if (alt + d_alt + safety_distance) <= drone_altitude:\n continue\n # plot it\n grid[n][e] = 1\n\n return grid", "def generate_regular_grid_point_coords(R, side_size, device):\n aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)\n r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)\n return r.view(1, -1, 2).expand(R, -1, -1)", "def makeCMSgridNodes(x0, y0, azi, dx, dy, z):\n # convert from node calculation to centric calculation\n # first move origin from vertex of grid to center of first grid cell\n\n # first convert to FRF coordinates\n FRF = gp.FRFcoord(x0, y0, coordType='ncsp')\n # shift origin to cell center instead of cell vertex\n x0N = FRF['xFRF'] - dx[0]/2\n y0N = FRF['yFRF'] - dy[0]/2\n # create new dx/dy array spaced with half of each of the 2 cells\n dxN = dx[:-1] + np.diff(dx)/2\n dyN = dy[:-1] + np.diff(dy)/2 # new nodes at the grid center - needed to fit into\n # create new nodes in FRF x and FRF Y using cell centric locations for accurate interpolation\n outXfrf, outYfrf = createGridNodesinFRF(x0N, y0N, dxN, dyN, dx.shape[0], dy.shape[0])\n xFRF, yFRF = np.meshgrid(outXfrf, sorted(outYfrf))\n # new work no need to loop as above\n convert2 = gp.FRFcoord(xFRF.flatten(), yFRF.flatten(), coordType='FRF')\n lat = convert2['Lat'].reshape(xFRF.shape)\n lon = convert2['Lon'].reshape(xFRF.shape)\n easting = convert2['StateplaneE'].reshape(xFRF.shape)\n northing = convert2['StateplaneN'].reshape(yFRF.shape)\n # making i's and j's for cell numbers\n ii = np.linspace(1, xFRF.shape[1], xFRF.shape[1])\n jj = np.linspace(1, yFRF.shape[0], yFRF.shape[0])\n\n BathyPacket = {'i': ii,\n 'j': jj,\n 'latitude': lat,\n 'longitude': lon,\n 'easting': easting,\n 'northing': northing,\n 'xFRF': sorted(xFRF[0, :]),\n 'yFRF': yFRF[:, 0],\n 'azimuth': azi,\n 'x0': x0,\n 'y0': y0,\n 'DX': dxN,\n 'DY': dyN,\n 'ni': len(ii),\n 'nj': len(jj),\n 'elevation': z, # exported as [t, x,y] dimensions\n 'gridFname': 'CMS GRid',\n 'time': 0}\n\n return BathyPacket", "def _build_raw_grid(self):\n self._raw_grid = parse_ascii_grid(self._ascii_grid)\n self.width = self._raw_grid.shape[0]\n self.height = self._raw_grid.shape[1]\n # If a start position has been specified, add it to grid.\n if self._agent_default_pos is not None:\n assert len(self._agent_default_pos) == 2\n x, y = self._agent_default_pos\n self._raw_grid[x, y] = 's'\n # If a goal position has been specified, add it to the grid.\n if self._goal_default_pos is not None:\n assert len(self._goal_default_pos) == 2\n x, y = self._goal_default_pos\n self._raw_grid[x, y] = 'g'", "def _cell_to_global(self, xy, wh):\n # grid setup\n line = tf.range(0, self.num_cells)\n rows = tf.reshape(line, [self.num_cells, 1])\n rows = tf.tile(rows, [1, self.num_cells])\n cols = tf.reshape(line, [1, self.num_cells])\n cols = tf.tile(cols, [self.num_cells, 1])\n grid = tf.stack([cols, rows], axis=-1)\n grid = tf.reshape(grid, [1, self.num_cells, self.num_cells, 1, 2])\n grid = tf.cast(grid, tf.float32)\n # box transformation\n xy += grid\n wh *= tf.reshape(self.anchors, [1, 1, 1, self.num_anchors, 2])\n return tf.concat([xy, wh], axis=-1) / self.num_cells", "def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)", "def initialize_position(self):\n self.x = self.cell_xl + self.cell_dx * np.random.rand(1)[0]", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def push_left (grid):\r\n \r\n for i in range(4):\r\n row = grid[i]\r\n \r\n if row == [0, 0 ,0 ,0]:\r\n continue\r\n for k in range(4):\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0\r\n for l in range(1, 4):\r\n if row[l-1] == row[l]:\r\n row[l-1] = row[l]*2\r\n row[l] = 0\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0 \r\n grid[i] = row\r\n return grid", "def recreate_grid(self):\n\n self.print_numlist = arcade.SpriteList()\n for row in range(ROW_COUNT):\n for column in range(COLUMN_COUNT):\n sprite = arcade.Sprite(\n f\"Numbers/{self.grid[row][column]}.png\", scale=0.2\n )\n x = (MARGIN + WIDTH) * column + MARGIN + WIDTH // 2\n y = (MARGIN + HEIGHT) * row + MARGIN + HEIGHT // 2\n sprite.center_x = x\n sprite.center_y = y\n self.print_numlist.append(sprite)\n # Check to see if all squares have been filled in\n if 0 not in self.grid:\n # if Cameron.Check_for_Completion(self.grid) == True:\n self.done = True", "def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs", "def localize(image):\n\n # Call the vision function in order to have the grid with the obstacle and the goal coordinate\n object_grid, occupancy_grid, world = vision(image)\n\n # Correction of the goal coordinate in order to fit the A* coordinate\n goal_x = object_grid[0][1]\n goal_y = WIDTH_G - object_grid[0][0]\n goal_coor = (goal_x, goal_y)\n\n return occupancy_grid, goal_coor", "def translate_to_grid(location):\n\n columns = 'abcdefghi'\n return [int(columns.index(location[0].lower())), int(location[1:])-1]", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def init_position(self):\n\t\t\n\t\t# Starting position, 1 is for WHITE, -1 is for BLACK\n\t\tself['D4'] = self['E5'] = 1\n\t\tself['D5'] = self['E4'] = -1", "def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def push_left (grid):\n #moves the block if there is a 0 value\n for i in range(3):\n for j in range(1,4):\n for k in range(4):\n if grid[k][j-1]==0 or grid[k][j-1]==\" \":\n grid[k][j-1] = grid[k][j]\n grid[k][j]= 0\n #checks if adjacent blocks have the same values and adds them\n for i in range(1,4):\n for j in range(4):\n if grid[j][i-1]==grid[j][i]:\n grid[j][i-1]+=grid[j][i]\n grid[j][i]= 0 \n #moves the rest of the grid up\n for i in range(1,4):\n for j in range(4):\n if grid[j][i-1]== 0:\n grid[j][i-1] = grid[j][i]\n grid[j][i] = 0\n #if there is a value in the position\n return grid", "def print_grid(self):\n\t\tclear_screen()\n\n\t\tprint('# DUNGEON MAP #\\n')\n\n\t\tfor r in self.grid_matrix:\n\t\t\tfor c in r:\n\t\t\t\tprint(c, end='')\n\t\t\tprint()\t\t\t\t\t\t# use print('\\n' to space out grid further)\n\n\t\tprint('\\n{} is located at X'.format(self.player.info['Name']))\n\n\t\tpress_enter()", "def update_grid(self):\n # Check to see if we have moved squares\n _new_grid = self.calc_grid()\n if _new_grid == self._grid:\n return\n # Remove from old square and add to new square\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[_new_grid][self._type].add(self)\n # Update coordinates\n self._grid = _new_grid", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def push_left (grid):\r\n \r\n #moves values left\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0\r\n \r\n \r\n #moves values left\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0 \r\n \r\n \r\n #checks for similar values and combines\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==grid[row][column+1]:\r\n grid[row][column]=2*grid[row][column]\r\n grid[row][column+1]=0\r\n \r\n #moves remaining values left \r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0", "def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)", "def ion1_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion1 = x*self.a + y*self.b + z*self.c\n self.position['1A'] = np.dot(self.position_map[1],axes_vector) + self.ion1\n self.position['2A'] = np.dot(self.position_map[2],axes_vector) + self.ion1\n self.position['3A'] = np.dot(self.position_map[3],axes_vector) + self.ion1\n self.position['4A'] = np.dot(self.position_map[4],axes_vector) + self.ion1\n self.position['5A'] = np.dot(self.position_map[5],axes_vector) + self.ion1\n self.position['6A'] = np.dot(self.position_map[6],axes_vector) + self.ion1\n self.position['7A'] = np.dot(self.position_map[7],axes_vector) + self.ion1\n self.position['8A'] = np.dot(self.position_map[8],axes_vector) + self.ion1" ]
[ "0.64815533", "0.6462513", "0.6450318", "0.6435964", "0.6428564", "0.6419576", "0.63170576", "0.626487", "0.6256797", "0.61810344", "0.618049", "0.61792535", "0.6100417", "0.60906714", "0.60497826", "0.60250664", "0.60003793", "0.5986764", "0.59801954", "0.5972718", "0.5964631", "0.5961082", "0.5957359", "0.59498644", "0.5941504", "0.5916404", "0.59132403", "0.5901583", "0.58909476", "0.58799356", "0.5878846", "0.58678484", "0.58504", "0.5838508", "0.58270365", "0.58163786", "0.5801873", "0.58007425", "0.5799373", "0.57984823", "0.5787539", "0.57815963", "0.5776819", "0.577534", "0.5769758", "0.5760402", "0.5757484", "0.5750526", "0.57333094", "0.57253003", "0.5714013", "0.5705632", "0.57006633", "0.5683427", "0.5682265", "0.56803256", "0.5672895", "0.56718934", "0.5671704", "0.566189", "0.5661777", "0.5652495", "0.5650304", "0.5643997", "0.5643261", "0.5637949", "0.56345075", "0.56327105", "0.56216466", "0.56186765", "0.5612454", "0.5609392", "0.56081915", "0.56040967", "0.55966085", "0.55955786", "0.55929506", "0.55889463", "0.55827326", "0.557914", "0.55776733", "0.5564882", "0.5561116", "0.5557165", "0.55562526", "0.55553895", "0.5550775", "0.5545049", "0.5544769", "0.5542651", "0.5538216", "0.5538028", "0.5537906", "0.55366874", "0.5530848", "0.55251795", "0.5522304", "0.5521498", "0.5516886", "0.5515067", "0.55145514" ]
0.0
-1
Create/Update the sprite shape for an entity and add/update the entry for it in `self.entities_shapelist`
def update_shape_sprite(self, entity: Entity): shape_sprite: ShapeSprite = entity.shape_sprite if entity.id not in self.entities_shapelist: entity_shapelist = arcade.ShapeElementList() # we need to convert from general colours to arcade specific colours entity_shapelist.append(arcade.create_rectangles_filled_with_colors( shape_sprite.point_list, [COLOUR_MAP[x] for x in shape_sprite.color_list]) ) else: entity_shapelist = self.entities_shapelist[entity.id] entity_shapelist.center_x = shape_sprite.position_x entity_shapelist.center_y = SCREEN_HEIGHT - shape_sprite.position_y entity_shapelist.draw() self.entities_shapelist[entity.id] = entity_shapelist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_entity(self, entity: Entity):\n \n if entity.shape_sprite:\n return self.update_shape_sprite(entity)\n \n left = (entity.x - entity.half_width)\n right = (entity.x + entity.half_width)\n # because arcade 0 on y is the bottom of the screen not the top\n bottom = abs((entity.y + entity.half_height) - SCREEN_HEIGHT)\n # bottom = entity.y - entity.half_height - SCREEN_HEIGHT\n top = abs((entity.y - entity.half_height) - SCREEN_HEIGHT)\n # top = entity.y + entity.half_height - SCREEN_HEIGHT\n \n arcade.draw_lrtb_rectangle_filled(\n left = left,\n right = right,\n bottom = bottom,\n top = top,\n color = COLOUR_MAP[entity.base_colour],\n )", "def create_sprite(self, pos):\n group = pyglet.sprite.SpriteGroup(\n self.TEXTURE, gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA\n )\n texcoords = []\n for i in xrange(self.length + 1):\n texcoords.extend([\n self.TEXTURE.tex_coords[0], i,\n self.TEXTURE.tex_coords[3], i,\n ])\n count = 2 * (self.length + 1)\n verts = [0, 0] * count # set vertices later from body\n self.vlist = batch.add(\n count, gl.GL_TRIANGLE_STRIP, group,\n ('v2f', verts),\n ('t2f', texcoords)\n )", "def shapes(self, shape_list):\n for item in shape_list:\n item.store()\n shape_list_uuids = [item.uuid for item in shape_list]\n self.set_attribute('shapes', shape_list_uuids)", "def add_entity(self, ent):\n self.tiles[ent.position[x]][ent.position[y]].add_entity(ent)", "def __init__(self, entities):\n self._shape_to_ent = dict()\n self._ent_to_shapes = dict()\n for entity in entities:\n shapes = entity.shapes\n self._ent_to_shapes[entity] = shapes\n for shape in shapes:\n assert shape not in self._shape_to_ent, \\\n f\"shape {shape} appears in {entity} and \" \\\n f\"{self._shape_to_ent[shape]}\"\n self._shape_to_ent[shape] = entity", "def shapes(self, shapes):\n\n self.container['shapes'] = shapes", "def add_shape(self, spec):\n color_, shape_ = spec\n if shape_ is None:\n shape_ = self.random_shape()\n if color_ is None:\n color_ = self.random_color()\n x = shape.rand_pos()\n y = shape.rand_pos()\n return shape.SHAPE_IMPLS[shape_](x=x, y=y, color_=color_)", "def place_entity(entity, base, x, y):\n \n img = entity.copy().convert(\"RGBA\")\n\n # Get random angle for placement\n angle = random.randint(-ROTATION_RATE, ROTATION_RATE)\n img = img.rotate(angle, expand=1)\n\n # Placement\n base.paste(img, (x, y), img)", "def update_shape_vaos(self, instance, show):\n shape = self._shape(instance)\n\n shape_object_id = id(shape)\n if not shape_object_id in self._shape_vaos:\n self._shape_vaos[shape_object_id] = VertexArray({\n 'vertex_position': VertexBuffer.from_numpy(shape.verticies),\n 'texture_coords': VertexBuffer.from_numpy(shape.texture_coords),\n }, self.program.attributes)", "def process_spawned_event(self, event):\n self.sprites[event.id] = [event.point, event.sprite]\n self.img[event.point.y, event.point.x] = self.sprite_colors[event.sprite]", "def add_to_default_batch(self):\n\n '''\n self.shape = shared.batch.add(4, gl.GL_QUADS, None,\n ('v2f', (self.x, self.y,\n self.x + self.width, self.y,\n self.x + self.width, self.y + self.height,\n self.x, self.y + self.height)))\n \n numPoints = 50\n verts = []\n for i in range(numPoints):\n angle = math.radians(float(i)/numPoints * 360.0)\n x = self.radius*cos(angle) + self.x\n y = self.radius*sin(angle) + self.y\n verts += [int(x),int(y)]\n \n '''\n data = create_circle(self.x, self.y, self.radius, shared.batch)\n\n self.shape = shared.batch.add_indexed(data[0], data[1], data[2], data[3], data[4], data[5])\n\n #self.shape = shared.batch.add(numPoints, gl.GL_POLYGON, None,\n # ('v2f', verts))", "def draw(self, shape):\n shape.draw(shader=self.shader)", "def _add_full_entity(self, entity):\n marked_id = utils.get_peer_id(\n utils.get_input_peer(entity, allow_self=False), add_mark=True\n )\n try:\n old_entity = self._entities[marked_id]\n old_entity.__dict__.update(entity.__dict__) # Keep old references\n\n # Update must delete old username and phone\n username = getattr(old_entity, 'username', None)\n if username:\n del self._username_id[username.lower()]\n\n phone = getattr(old_entity, 'phone', None)\n if phone:\n del self._phone_id[phone]\n except KeyError:\n # Add new entity\n self._entities[marked_id] = entity\n\n # Always update username or phone if any\n username = getattr(entity, 'username', None)\n if username:\n self._username_id[username.lower()] = marked_id\n\n phone = getattr(entity, 'phone', None)\n if phone:\n self._username_id[phone] = marked_id", "def __init__(self, size:Point, **kwargs):\n PhysicsEntity.__init__(self, **kwargs)\n self.size = size\n self.collision_shape = to_collision_rect(self.size)", "def set_shape(self, shape):\n self._shape = self._shape.merge_with(shape)", "def update_counter(cls, value):\n SFFShape.shape_id = value", "def __init__(self,canvas=None,spritePath=defaultSpritePath,hitboxRadius=0,xPos=0,yPos=0):\n global entityCounter\n global registeredEntities\n self.ID=entityCounter\n entityCounter+=1\n registeredEntities[self.ID]=self \n \n #these variables deal with position and motion\n self.xPos=xPos #the x postion of the entity\n self.yPos=yPos #the y position of the entity\n self.xMomentum=0.0\n self.yMomentum=0.0\n self.faceHeading=0.0\n \n self.hitboxRadius=hitboxRadius\n \n #these variabls and other junk deal with drawing the sprite onscreen\n self.spritePath=spritePath#the path to the image that this entity instance will use\n self.spriteImageFile=(Image.open(self.spritePath)) #the image file that we'll manipulate mainly when doing rotations\n self.spriteImage = ImageTk.PhotoImage(self.spriteImageFile.rotate(self.faceHeading,expand=True)) #the thing that tkinter uses as an image to draw on a canvas\n\n #theres two spriteImage variables because of the weird way that you basically have to reload the image if you want to rotate it. its a weirdity with tkinter\n self.spriteOnCanvas=None #the variable that holds a refrence to the actual drawn-on-screen thingy that's actually on the canvas\n self.canvasIGetDrawnOn=None #the canvas that this instance of the entitiy class will have its sprite drawn on\n self.canvasIGetDrawnOnsWidth=0\n self.canvasIGetDrawnOnsHeight=0\n if (canvas!=None):\n self.setCanvas(canvas)\n \n #these variables deal with motion and rotation due to player interaction\n self.isAcceleratingForward=False", "def __init__(self, shape, ssize, pos=None):\n super(Obstacle, self).__init__()\n self.pos = pos or Vec2d(0, 0)\n self.shape = shape\n # image\n self.image = pygame.Surface(ssize).convert_alpha()\n self.color = pygame.Color(\"black\")\n self.ssize = ssize\n self.rect = pygame.Rect((0, 0), self.ssize)", "def draw_item(self):\r\n self.screen.blit(self.spawned_item, self.rect)", "def Place(self, ref, scent):\n coords = self.Map.layerSolid.GetXYByRef(ref)\n self[coords] = Pheromone(scent, coords)", "def add_row(self, shape, attributes=[]):\n if isinstance(shape, shapefile.shapefile._Shape):\n self.shapes.append(shape)\n self.__shapeHolder._shapes.append(shape)\n else:\n if self.shapeType in (1, 8, 11, 21, 25, 31):\n self.__shapeHolder.point(*shape)\n elif self.shapeType in (3, 13, 23):\n addShp = self.__shapeHolder.line(shape)\n else:\n self.__shapeHolder.poly(shape)\n\n self.shapes.append(self.__shapeHolder.shapes()[-1])\n self.records.append(self.addDefaults(attributes))\n self.__isBuilt = False", "def add_sprite(self, segment, name, x, y=0.0):\n sprite = sp.Sprite(name, x, y)\n segment.sprites.append(sprite)", "def redraw_all_shapes(self):\n\n for shape_id in self.variables.shape_ids:\n pixel_coords = self.get_vector_object(shape_id).image_coords\n if pixel_coords:\n new_canvas_coords = self.shape_image_coords_to_canvas_coords(shape_id)\n self.modify_existing_shape_using_canvas_coords(shape_id, new_canvas_coords, update_pixel_coords=False)", "def register_shapes():\n turtle.Screen().register_shape(\"saphire.gif\")\n turtle.Screen().register_shape(\"player_right.gif\")\n turtle.Screen().register_shape(\"player_left.gif\")\n turtle.Screen().register_shape(\"walls.gif\")", "def add_shape(self, shape):\n\n if isinstance(shape, Shape):\n self.shapes.append(shape)\n else:\n raise TypeError", "def create_wall_shape(self):\n self.shape_walls = arcade.ShapeElementList()\n self.shape_walls.center_x = 0\n self.shape_walls.center_y = 0\n self.shape_walls.angle = 0\n\n point_list = []\n color_list = []\n \n # create the walls into a single shape\n walls = self.game.walls\n for wall in walls:\n points = self.get_entity_dimensions(wall)\n point_list.append(points[0])\n point_list.append(points[1])\n point_list.append(points[2])\n point_list.append(points[3])\n \n # as we have 4 points\n for i in range(4):\n color_list.append(COLOUR_MAP[wall.base_colour])\n \n self.shape_walls.append(\n arcade.create_rectangles_filled_with_colors(point_list, color_list)\n )", "def paintShoes(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"bodySize\"], \"shoes\", self.avatarConfiguration[\"shoes\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"shoes\")", "def assignPointsToShapes(self):\n pointsCopy = self.mission['points'].copy()\n\n while len(pointsCopy):\n shape = []\n self.recursiveAddPointToShape(pointsCopy, [pointsCopy[0]], shape)\n shape.append(shape[0])\n self.mission['shapes'].append(shape)", "def modify_existing_shape_using_canvas_coords(self, shape_id, new_coords, update_pixel_coords=True):\n vector_object = self.get_vector_object(shape_id)\n if vector_object.type == SHAPE_TYPES.POINT:\n point_size = vector_object.point_size\n x1, y1 = (new_coords[0] - point_size), (new_coords[1] - point_size)\n x2, y2 = (new_coords[0] + point_size), (new_coords[1] + point_size)\n canvas_drawing_coords = (x1, y1, x2, y2)\n else:\n canvas_drawing_coords = tuple(new_coords)\n self.coords(shape_id, canvas_drawing_coords)\n if update_pixel_coords:\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, new_coords)", "def SetShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_SetShape(self, *args)", "def add_object(self, obj_data, obj_name, obj_orientation, qpmi, entity):\n self.objects.append((obj_data, obj_name, obj_orientation, qpmi, entity))\n if len(self.objects) == 1:\n self.set_default_brush()", "def _settle_shape(self, shape):\n if shape:\n for block in shape.blocks:\n self.array[block.row_position][block.column_position] = block\n self.remove_completed_lines()", "def implement_shape(self, shape):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n for coord in shape:\n self.givebirth(coord)", "def create_and_add_item_to_list(filename: str, scale: float, center_x: float, center_y: float, item_list: arcade.SpriteList) -> None:\n item = arcade.Sprite(filename, scale)\n item.center_x = center_x\n item.center_y = center_y\n item_list.append(item)", "def _create_shape(self, queryset, model, columns, filename):\n geo_field = geo_field_from_model(model, app_settings['GEOM_FIELD_NAME'])\n get_geom, geom_type, srid = info_from_geo_field(geo_field)\n\n if geom_type.upper() in (GeometryField.geom_type, GeometryCollectionField.geom_type):\n\n by_points, by_linestrings, multipoints, multilinestrings = self.split_bygeom(queryset, geom_getter=get_geom)\n\n for split_qs, split_geom_field in ((by_points, PointField),\n (by_linestrings, LineStringField),\n (multipoints, MultiPointField),\n (multilinestrings, MultiLineStringField)):\n if len(split_qs) == 0:\n continue\n split_geom_type = split_geom_field.geom_type\n shp_filepath = shape_write(split_qs, model, columns, get_geom, split_geom_type, srid)\n filename = '%s_%s' % (filename, split_geom_type.lower())\n self.layers[filename] = shp_filepath\n\n else:\n shp_filepath = shape_write(queryset, model, columns, get_geom, geom_type, srid)\n self.layers[filename] = shp_filepath", "def _change_shape(self,x,y,w,h):\n top = y \n left = x\n right = x + w\n bottom = y + h\n return top,right,bottom,left", "def SetShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_SetShape(self, *args)", "def _draw(self):\n\n # Draw a rectangle on the game's canvas\n self.sprite = self.canvas.create_rectangle(self.left, self.bottom,\n self.right, self.top, fill=self.color)", "def modify_existing_shape_using_image_coords(self, shape_id, image_coords):\n\n self.set_shape_pixel_coords(shape_id, image_coords)\n canvas_coords = self.image_coords_to_canvas_coords(image_coords)\n self.modify_existing_shape_using_canvas_coords(shape_id, canvas_coords, update_pixel_coords=False)", "def add_object(self, obj): # DEFINE OBJ!\n obj.spritesheet_width = self.spritesheet.size['width']\n obj.spritesheet_height = self.spritesheet.size['height']\n \n obj._layer_added(self)\n \n\n obj.buffer_index = len(self.objects)\n self.objects.append(obj)\n\n x = obj.x\n y = obj.y\n \n self.verts.extend(((x, y, 0.0), (x+obj.width, y, 0.0), (x+obj.width, y-obj.height, 0.0), (x, y-obj.height, 0.0)))\n self.texcoords.extend(obj.uv_texture)\n self.norms.extend(((0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1)))\n\n if pi3d.PLATFORM == pi3d.PLATFORM_PI:\n self.inds.append((self.a,self.b,self.c))\n self.inds.append((self.d,self.a,self.c))\n else:\n self.inds.extend((self.a,self.b,self.c))\n self.inds.extend((self.d,self.a,self.c))\n\n self.a += 4\n self.b += 4\n self.c += 4\n self.d += 4\n\n \n #~ return len(self.sprites)-1", "def update(self) -> None:\n self.all_sprites.update()", "def _set_state_coordinates(atomic_entity, width, height):\n state_entity = atomic_entity.get(\"children\")[0]\n parent_coor = atomic_entity[\"coordinates\"]\n state_entity[\"coordinates\"] = {\n \"x\": parent_coor[\"x\"] + (parent_coor[\"width\"] - width) / 2,\n \"y\": parent_coor[\"y\"] - (height / 2),\n \"width\": width,\n \"height\": height,\n }", "def new_shape(self):\n if self.current_shape is not None and not self.current_shape:\n return self.current_shape\n else:\n shape = Shape()\n self.shapes.append(shape)\n self.current_shape = shape\n return shape", "def add_entity(self, entity):\n self.append(entity)\n if self.size > 0:\n self.sort()", "def append(self, shape: Shape):\n self.shapes.append(shape)", "def execute(self, fp):\n fp.Shape = Part.makeCompound(fp.Shape.Solids)", "def place_obj(self):\r\n for pos in BOARD_POSITIONS:\r\n self.board[pos[0]][pos[1]] = Stone(color=self.state[pos[0]][pos[1]], pos=(pos[0], pos[1]))\r\n self.board[pos[0]][pos[1]].liberty = self.board[pos[0]][pos[1]].compute_liberty(self.state)", "def draw(self, shape):\r\n if not self.s_flg:\r\n opengles.glEnable(GL_SCISSOR_TEST)\r\n opengles.glScissor(ctypes.c_int(int(0)), ctypes.c_int(self.y0),\r\n ctypes.c_int(self.ix), ctypes.c_int(1))\r\n self.s_flg = True\r\n shape.draw(shader=self.shader)", "def ScaleShape(shape, scale_x, scale_y):\n for i, pt in enumerate(shape.points):\n x, y = pt\n shape.points[i] = [scale_x * x, scale_y * y]", "def add_shape_rel(self, spec, oth_shape, relation, relation_dir):\n color_, shape_ = spec\n if shape_ is None:\n shape_ = self.random_shape()\n if color_ is None:\n color_ = self.random_color()\n if relation == 0:\n new_y = shape.rand_pos()\n if relation_dir == 0:\n # Shape must be LEFT of oth shape\n new_x = random.randint(c.X_MIN, oth_shape.x - c.BUFFER)\n else:\n # Shape RIGHT of oth shape\n new_x = random.randint(oth_shape.x + c.BUFFER, c.X_MAX)\n else:\n new_x = shape.rand_pos()\n if relation_dir == 0:\n # BELOW (remember y coords reversed)\n new_y = random.randint(oth_shape.y + c.BUFFER, c.X_MAX)\n else:\n # ABOVE\n new_y = random.randint(c.X_MIN, oth_shape.y - c.BUFFER)\n return shape.SHAPE_IMPLS[shape_](x=new_x, y=new_y, color_=color_)", "def stamp(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n tshape = shape._data\n if ttype == \"polygon\":\n stitem = screen._createpoly()\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(stitem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n stitem = screen._createimage(\"\")\n screen._drawimage(stitem, self._position, tshape)\n elif ttype == \"compound\":\n stitem = []\n for element in tshape:\n item = screen._createpoly()\n stitem.append(item)\n stitem = tuple(stitem)\n for item, (poly, fc, oc) in zip(stitem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n self.stampItems.append(stitem)\n self.undobuffer.push((\"stamp\", stitem))\n return stitem", "def add_entity(self, entity_obj):\n if (\n type(entity_obj) is not dict\n or \"entity_id\" not in entity_obj\n or \"mentions\" not in entity_obj\n ):\n raise ValueError(\n \"The input to update_entity needs to be a dictionary with an entity_id key and mentions key as \"\n \"you are replacing the entity information in bulk.\"\n )\n try:\n ent = EntityObj(\n entity_id=entity_obj[\"entity_id\"],\n mentions=entity_obj[\"mentions\"],\n title=entity_obj.get(\"title\", entity_obj[\"entity_id\"]),\n description=entity_obj.get(\"description\", \"\"),\n types=entity_obj.get(\"types\", {}),\n relations=entity_obj.get(\"relations\", []),\n )\n except ValidationError as e:\n print(e.json())\n raise e\n # We assume this is a new entity\n if self._entity_symbols.qid_exists(ent.entity_id):\n raise ValueError(\n f\"The entity {ent.entity_id} already exists. Please call update_entity instead.\"\n )\n # Add type systems of type_sys -> QID -> list of type names\n for type_sys in ent.types:\n if type_sys not in self._type_systems:\n raise ValueError(\n f\"Error {entity_obj}. When adding a new entity, you must use the same type system. \"\n f\"We don't support new type systems.\"\n )\n # Add kg relations QID -> relation -> list of object QIDs\n parsed_rels = {}\n for rel_pair in ent.relations:\n if \"relation\" not in rel_pair or \"object\" not in rel_pair:\n raise ValueError(\n \"For each value in relations, it must be a JSON with keys relation and object\"\n )\n if (\n self._kg_symbols is not None\n and rel_pair[\"relation\"] not in self._kg_symbols.get_all_relations()\n ):\n raise ValueError(\n f\"Error {entity_obj}. When adding a new entity, you must use the same set of relations. \"\n f\"We don't support new relations.\"\n )\n if rel_pair[\"relation\"] not in parsed_rels:\n parsed_rels[rel_pair[\"relation\"]] = []\n parsed_rels[rel_pair[\"relation\"]].append(rel_pair[\"object\"])\n # Lower case mentions for mention extraction\n mentions = [\n [get_lnrm(men[0], strip=True, lower=True), men[1]] for men in ent.mentions\n ]\n self._entity_symbols.add_entity(\n ent.entity_id, mentions, ent.title, ent.description\n )\n for type_sys in self._type_systems:\n self._type_systems[type_sys].add_entity(\n ent.entity_id, ent.types.get(type_sys, [])\n )\n if self._kg_symbols is not None:\n self._kg_symbols.add_entity(ent.entity_id, parsed_rels)", "def __init__(self,canvas=0,spritePath=bulletSpritePath,hitboxRadius=bulletHitboxRadius,xPos=0,yPos=0,entityThatCreatedMe=None):\n Entity.__init__(self,canvas,spritePath,xPos=xPos,yPos=yPos)\n self.creationTime=time.time()#a time stamp for exactly when the bullet was spawned, this is used in preventing the bullet from traveling forever\n self.entityThatCreatedMe=entityThatCreatedMe#a refrence back to the ship that fired this bullet\n #registering this bullet in a big list o' bullets\n self.bulletID=Bullet.bulletCounter\n Bullet.bulletCounter+=1\n Bullet.registeredBullets[self.bulletID]=self\n Bullet.bulletSound.play()", "def add_sprite(self, path):\r\n sprite = Sprite(path=path, config=self.config)\r\n self.sprites.append(sprite)", "def _create_fleet(self):\n alien = Alien(self) # used for calculations, NOT part of fleet\n # <Alien sprite(in 0 groups)>\n\n # Get dimensions for ship & alien\n ship_height = self.ship.rect.height\n alien_width, alien_height = alien.rect.size # (60, 58)\n \n # find available space for aliens to fit on screen\n available_space_y = (self.settings.screen_height - (3 * alien_height) - ship_height)\n available_space_x = self.settings.screen_width - ( 2 * alien_width )\n # available_space_y = (800) - (3 * 58) - 48 = 578\n # available_space_x = 1200 - (2 * 60) = 1080\n\n # determine total number of aliens per row & total number of rows \n number_aliens_x = available_space_x // ( 2 * alien_width )\n number_rows = available_space_y // ( 2 * alien_height )\n # number_aliens_x = 1080 // (2 * 60) = 9\n # number_rows = 578 // (2 * 58) = 4\n\n # Create rows of aliens\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n # Fill row with aliens\n self._create_alien(alien_number, row_number )\n\n # rect = <rect(x, y, width, height)> <rect(180, 58, 60, 58)>", "def reDraw(self):\n self.canvasIGetDrawnOn.delete(self.spriteOnCanvas)\n self.spriteImage = ImageTk.PhotoImage(self.spriteImageFile.rotate(self.faceHeading, expand=True))\n self.spriteOnCanvas=self.canvasIGetDrawnOn.create_image(self.xPos,self.yPos,image=self.spriteImage)", "def draw(self):\n self.ball_sprite.draw()", "def update(self):\n if self.__first:\n self.__first = False\n self.__map_data = self.__gui_handler.get_map_data()\n self.__next_data = self.__gui_handler.get_entities()\n labels = []\n\n # Découverte du terrain\n for terrain in SimUtils.get_terrains():\n self.__terrain.append(terrain.color)\n labels.append(StatItem(terrain.name, \"\", terrain.color))\n\n # Tri lexicographique des labels.\n labels.sort(key=lambda stat: stat._name)\n # Ajout des labels de terrain\n for label in labels:\n self.__gui_handler.add_stat(label)\n\n # Remplissage de la carte avec les terrains.\n for i in range(0, self.__width):\n for j in range(0, self.__height):\n # Affichage du point.\n color = QColor(self.__terrain[self.__map_data.get_terrain_type(i,j)])\n self.__image.setPixel(i,j,color.rgb())\n\n # Permet de faire le tri entre les entités déjà rencontrées et les\n # autres.\n entity_types = {}\n\n # Liste des futurs labels\n labels = []\n\n # Découverte des entités - affectation des couleurs\n for entity in self.__next_data:\n # Ajout des labels de couleur pour les entités\n if not entity_types.has_key(entity.__name__):\n entity_types[entity.__name__] = True\n\n for label, color in entity._labels.iteritems():\n labels.append(StatItem(label, \"\", color))\n\n # Affichage de l'entité.\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())\n self.positions[id(entity)] = [entity._x, entity._y]\n\n # Tri lexicographique des labels.\n labels.sort(key=lambda stat: stat._name)\n\n for label in labels:\n self.__gui_handler.add_stat(label)\n else:\n # Mise à jour du rendu\n for entity in self.__next_data:\n # Cas d'une entité désactivée (morte)\n remove_entity = not entity._is_active()\n if id(entity) not in self.positions:\n # Ajout de l'entité en cours de simulation\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())\n self.positions[id(entity)] = [entity._x,entity._y]\n\n # Le simulateur demande de repeindre l'entité\n old_points = self.positions[id(entity)]\n\n if not remove_entity:\n self.positions[id(entity)] = [entity._x, entity._y]\n\n # On remet la couleur du terrain.\n color = QColor(self.__terrain[self.__map_data.get_terrain_type(old_points[0], old_points[1])])\n self.__image.setPixel(old_points[0], old_points[1], color.rgb())\n\n if not remove_entity:\n # Ajout des paramètres de setPixel dans une liste pour être ploté après.\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())", "def _place_new_obj(self, (screen_width, screen_height)):\n old_tree = self.objects.get()\n new_x = (-old_tree.position[0]) + old_tree.max_width*2 + screen_width\n another_tree = Grass((new_x, screen_height), self.width, self.height)\n self.objects.put(another_tree)", "def draw(hyp):\r\n print 'g.createShape(',hyp.getAttList(),')'\r\n print type(hyp.getAttList())\r\n g.createShape(hyp.getAttList())", "def update_shape(source, target):\n\n # clean uvs on mesh nodes\n clean_uvs_sets(target)\n\n # get attributes names\n attributes = get_shape_type_attributes(source)\n\n logger.debug(\"Updating shape: {} using --> {}\".format(target, source))\n\n # updates the shape\n cmds.connectAttr(\"{}.{}\".format(source, attributes[\"output\"]),\n \"{}.{}\".format(target, attributes[\"input\"]),\n force=True)\n\n # forces shape evaluation to achieve the update\n cmds.dgeval(\"{}.{}\".format(target, attributes[\"output\"]))\n\n # finish shape update\n cmds.disconnectAttr(\"{}.{}\".format(source, attributes[\"output\"]),\n \"{}.{}\".format(target, attributes[\"input\"]))", "def _update_vertices(self):\n raise NotImplementedError(\"_update_vertices must be defined\"\n \"for every ShapeBase subclass\")", "def add_dynamic_obj(self, ref_frame, obj_name, pos, ori, size,\n touch_links=None):\n if not isinstance(pos, list):\n raise TypeError('pos should be a list')\n if not isinstance(ori, list):\n raise TypeError('ori should be a list')\n\n pose = pos + ori\n pose = conversions.list_to_pose(pose)\n pose_stamped = PoseStamped()\n pose_stamped.header.frame_id = ref_frame\n pose_stamped.pose = pose\n\n if isinstance(size, float):\n size = (size, size, size)\n elif isinstance(size, list) or isinstance(size, tuple):\n if len(size) != 3:\n raise ValueError('If size is a list or tuple, its length'\n ' should be 3 for a box')\n else:\n raise TypeError('size should be a float number, a 3-element list '\n 'or a 3-element tuple for a box')\n if isinstance(size, list):\n size = tuple(size)\n if touch_links is None:\n self.scene.attach_box(ref_frame, obj_name, pose_stamped, size)\n else:\n # moveit ignores collisions between box and links in touch_links\n self.scene.attach_box(ref_frame, obj_name,\n pose_stamped, size,\n touch_links=touch_links)\n obj_dict, obj_adict = self.get_objects()\n success = False\n if obj_name in obj_adict.keys():\n success = True\n return success", "def update_entities(self):\n raise NotImplementedError()", "def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r", "def draw(hyp):\n print 'g.createShape(',hyp.getAttList(),')'\n print type(hyp.getAttList())\n g.createShape(hyp.getAttList())", "def NewShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_NewShape(self, *args)", "def on_draw(self):\n\n # Clear the screen and start drawing\n arcade.start_render()\n\n # Draw the rectangles\n for shape in self.shapes:\n shape.draw()", "def __init__(self,canvas,xPos=0,yPos=0,size=2):\n self.size=size#this denotes if the rock is small(0) medium(1) or large(2)\n if self.size==2:\n #large rock\n self.spritePath=rockLargeSpritePath\n self.hitboxRadius=rockLargeHitboxRadius\n self.pointValue=rockLargePointValue\n elif self.size==1:\n #medium\n self.spritePath=rockMediumSpritePath\n self.hitboxRadius=rockMediumHitboxRadius\n self.pointValue=rockMediumPointValue\n elif self.size==0:\n #small\n self.spritePath=rockSmallSpritePath\n self.hitboxRadius=rockSmallHitboxRadius\n self.pointValue=rockSmallPointValue\n Entity.__init__(self,canvas,self.spritePath,self.hitboxRadius,xPos,yPos)#call parent constructior\n self.faceHeading=random.randint(0,360)#pick a random direction to start with\n self.accelerateForwards(movementSpeed=random.uniform(*rockSpeedRange))\n #register this rock in a big old list o' rocks\n self.rockID=Rock.rockCounter\n Rock.rockCounter+=1\n Rock.registeredRocks[self.rockID]=self\n self.reDraw()", "def model_geom_fr_scratch_2d(): \n\n geom = [[],[]]\n geom2 = [[],[]]\n\n obj = object25d()\n\n #add new geom and auto increment the ids\n polys = [(1,2,3), (2,3,4) ]\n pts = [(1,1,1),(0,1,1),(-1,-1,1),(2,-2,1)]\n geom = obj.insert_polygons(polys, pts, geom=geom) \n\n \n polys = [(1,2,3,4) ]\n pts = [(4,-4.3,-3),(1.5,-2.5,-2.1),(-2,2,-4),(4,-4.2,1)]\n geom2 = obj.insert_polygons(polys, pts, geom=geom2) \n\n # use insert to add geom to object \n obj.insert(geom) \n obj.insert(geom2) \n \n # see what we have done, or not done \n obj.show() \n\n obj.save(\"3d_obj/foo.obj\")", "def paintShirt(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"bodySize\"], self.avatarConfiguration[\"typeShirt\"]+\"_shirt\", self.avatarConfiguration[\"shirt\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"shirt\")", "def add(self, entity):\n self.entities.add(entity)\n return entity", "def new(self):\n #groups for drawing\n self.moving_sprites = pg.sprite.LayeredUpdates() \n self.static_sprites = pg.sprite.LayeredUpdates()\n #other groups\n self.walls = pg.sprite.Group()\n self.teleports = pg.sprite.Group() \n self.win = pg.sprite.Group() \n self.threat = pg.sprite.Group()\n self.hearts= pg.sprite.Group()\n \n for tile_object in self.map.tmxdata.objects:\n if tile_object.name == \"player\":\n self.player = Player(self, tile_object.x, tile_object.y)\n if tile_object.name == \"monster\":\n self.monster = Monster(self, tile_object.x, tile_object.y)\n if tile_object.name == \"wall\":\n Obstacle(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n if tile_object.name == \"mirror\":\n Mirror(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height, self.destinations)\n if tile_object.name == \"pentagram\":\n self.goal=Pentagram(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n\n self.camera = Camera(self.map.width, self.map.height)\n\n #static sprites\n self.flashlight=Flashlight(self, int(WIDTH/2), int(HEIGHT/2))\n self.darkness=Darkness(self, int(WIDTH/2), int(HEIGHT/2))\n if self.minimap_name != None:\n self.minimap=Minimap(self, self.minimap_name)\n for i in range(int(PLAYERHEALTH/10)):\n Heart(self, 726-37*(2-i), 20)\n self.battery= Battery(self, 726, 52)\n self.draw_debug = False\n\n self.teleport_list=[]\n for tele in self.teleports:\n self.teleport_list.append(tele)", "def _update_well_meta(self, pos):\n self.well_meta['well']['images'] = [{'path': self.positions[pos]['name']}]\n self.store[self.rows[0]][self.columns[pos]].attrs.put(self.well_meta)", "def add_row(self, shape, attributes):\n if isinstance(shape, shapefile.shapefile._Shape):\n self.w._shapes.append(shape)\n else:\n if self.shapeType in (1, 8, 11, 21, 25, 31):\n self.w.point(*shape)\n elif self.shapeType in (3, 13, 23):\n self.w.line(shape)\n else:\n self.w.poly(shape)\n self.w.record(*attributes)", "def create(self, pygame):\n\n white = (255,255,255)\n self.obstacle_img = pygame.image.load(\"./Images/Obstacle.png\").convert()\n self.obstacle_img.set_colorkey(white)\n\n for i in range(8):\n self.random_objects.append(pygame.image.load(\"./Images/Object{}.png\".format(i+1)).convert())\n # self.random_objects[i].set_colorkey(white)", "def put_fireitem(self, x, y):\n cell_size = self.map.get_cell_size()\n obj = FireItem(parent=self.map,\n style={\n 'width': cell_size,\n 'height': cell_size,\n 'z-index': layers['object'] }\n )\n def _on_eat(character):\n self.audio.play('kan.wav')\n # Increment the bomb power of this character\n character.bomb_power += 1\n\n make_breakable(self, obj)\n make_item(self, obj, on_eat=_on_eat)\n self.map.add_node(obj, x, y)", "def cli(ctx, entity, params={}):\n return ctx.gi.entity.add_entity(entity, params=params)", "def move_to(self, entity, location):\n y, x = location\n if not y in range(self.size) or not x in range(self.size):\n return\n y, x = entity.location\n self.grid[y][x].contents.remove(entity)\n entity.location = location\n y, x = location\n self.grid[y][x].contents.append(entity)\n for ent in self.grid[y][x].contents:\n try:\n if not ent.player_enter_callback is None:\n ent.player_enter_callback(ent)\n except AttributeError:\n pass", "def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)", "def update_entity(self, entity_obj):\n if (\n type(entity_obj) is not dict\n or \"entity_id\" not in entity_obj\n or \"mentions\" not in entity_obj\n ):\n raise ValueError(\n \"The input to update_entity needs to be a dictionary with an entity_id key and mentions key as \"\n \"you are replacing the entity information in bulk.\"\n )\n if not self._entity_symbols.qid_exists(entity_obj[\"entity_id\"]):\n raise ValueError(f\"The entity {entity_obj['entity_id']} is not in our dump\")\n try:\n ent = EntityObj(\n entity_id=entity_obj[\"entity_id\"],\n mentions=entity_obj[\"mentions\"],\n title=entity_obj.get(\"title\", entity_obj[\"entity_id\"]),\n types=entity_obj.get(\"types\", {}),\n relations=entity_obj.get(\"relations\", []),\n )\n except ValidationError as e:\n print(e.json())\n raise e\n # Update mentions\n for men in self.get_mentions(ent.entity_id):\n self._entity_symbols.remove_alias(ent.entity_id, men)\n for men in ent.mentions:\n # Lower case mentions for mention extraction\n men = [get_lnrm(men[0], strip=True, lower=True), men[1]]\n self._entity_symbols.add_alias(ent.entity_id, men)\n # Update title\n self._entity_symbols.set_title(ent.entity_id, ent.title)\n # Update types\n for type_sys in self._type_systems:\n for typename in self._type_systems[type_sys].get_types(ent.entity_id):\n self._type_systems[type_sys].remove_type(ent.entity_id, typename)\n for type_sys in ent.types:\n for typename in ent.types[type_sys]:\n self._type_systems[type_sys].add_type(ent.entity_id, typename)\n # Update KG\n if self._kg_symbols is not None:\n for rel in self._kg_symbols.get_relations(ent.entity_id):\n for qid2 in self._kg_symbols.get_connections_by_relation(\n ent.entity_id, rel\n ):\n self._kg_symbols.remove_kg(ent.entity_id, rel, qid2)\n for rel_pair in ent.relations:\n self._kg_symbols.add_kg(\n ent.entity_id, rel_pair[\"relation\"], rel_pair[\"object\"]\n )", "def __init__(self, i, j):\n pygame.sprite.Sprite.__init__(self)\n #self.image = pygame.Surface([30,30])\n #self.image.fill(self.wallColor)\n self.image = pygame.image.load('stone_wall.png').convert_alpha()\n self.pos = (i*30,j*30,)\n self.rect = pygame.Rect(i*30,j*30,30,30)\n self._layer = 2", "def add_shape(self,shape):\n if not isinstance(shape,_Poly3Profile):\n raise TypeError('add_elevation requires an _Elevation as input, not ' + str(type(shape)))\n self.shapes.append(shape)", "def set_sprite(self, image):\n self.current_sprite = image\n self.draw_alpha()", "def update(self, *args, **kwargs):\n attributes = [\"id\", \"size\", \"x\", \"y\"]\n if len(args) > 0:\n for i in range(len(args)):\n setattr(self, attributes[i], args[i])\n else:\n self.id = kwargs.get(\"id\", self.id)\n self.size = kwargs.get(\"size\", self.size)\n self.x = kwargs.get(\"x\", self.x)\n self.y = kwargs.get(\"y\", self.y)", "def on_draw(self):\r\n\r\n \r\n # clear the screen to begin drawing\r\n arcade.start_render()\r\n\r\n background = arcade.load_texture(\"gala.png\")\r\n arcade.draw_texture_rectangle(SCREEN_WIDTH/2, SCREEN_HEIGHT/2,SCREEN_WIDTH , SCREEN_HEIGHT, background) \r\n \r\n\r\n for asteriod in self.rocks:\r\n asteriod.draw()\r\n \r\n # for asteriod in self.rockss:\r\n # asteriod.draw()\r\n\r\n # for asteriod in self.rocksss:\r\n # asteriod.draw() \r\n \r\n for bullet in self.bullets:\r\n bullet.draw()\r\n \r\n \r\n self.ship.draw()\r\n \r\n \r\n # TODO: draw each object\r", "def perform_create(self, serializer):\n imgs = []\n for image_field, hash_field, suffix in self.image_fields:\n if serializer.validated_data.get(image_field):\n img_url = serializer.validated_data[image_field]\n img, hash_ = image_from_url(img_url)\n # Store img for `post_save` where we have access to the pk so\n # we can save img in appropriate directory.\n imgs.append((suffix, img, hash_))\n serializer.validated_data[hash_field] = hash_\n elif ((serializer.validated_data.get('type') or\n (serializer.instance and\n getattr(serializer.instance, 'type', None))) ==\n feed.COLLECTION_PROMO):\n # Remove background images for promo collections.\n serializer.validated_data[hash_field] = None\n if image_field in serializer.validated_data:\n del serializer.validated_data[image_field]\n\n obj = serializer.save()\n\n for suffix, image, hash_ in imgs:\n if image:\n i = Image.open(image)\n path = obj.image_path(suffix)\n with public_storage.open(path, 'wb') as f:\n i.save(f, 'png')\n pngcrush_image.delay(path, set_modified_on=[obj])", "def draw_me(self):\r\n\t\tself.image.fill((100, 200, 100))\r\n\t\tif self.active: pg.draw.rect(self.image, (100, 100, 200), self.frame, 3) #if active => draw frame around selected entity width 3\r\n\t\tself.display_surface.blit(self.image, self.rect)", "def get_new(self, env):\n for sprite in env.new_sprites:\n if not isinstance(sprite, Dirt):\n if not isinstance(sprite, tako.Tako):\n self.widget_sprites.add(sprite)\n else:\n self.all_sprites.add(sprite)\n env.new_sprites.remove(sprite)", "def perform_create(self, serializer):\n serializer.save(created_by=self.request.user,\n modified_by=self.request.user,\n area=self.get_poly_obj())", "def _drawturtle(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n titem = self.Myturtle._item\n if self._shown and screen._updatecounter == 0 and screen._tracing > 0:\n self._hidden_from_screen = False\n tshape = shape._data\n if ttype == \"polygon\":\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(titem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n screen._drawimage(titem, self._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n else:\n if self._hidden_from_screen:\n return\n if ttype == \"polygon\":\n screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n screen._drawimage(titem, self._position,\n screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n self._hidden_from_screen = True", "def _createShip(self):\n self._ship=Ship()", "def ModifiedShape(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveLocations_ModifiedShape(self, *args)", "def add_np_entity(self, entity):\n if entity._type == Genre.HUMAN:\n self._entities['humans'].append(entity)\n\n elif entity._type == Genre.COP:\n self._entities['cops'].append(entity)\n\n elif entity._type == Genre.BERZERK:\n self._entities['berzerks'].append(entity)\n\n if entity not in self._entities['all']:\n self._entities['all'].append(entity)", "def update_entity_list(self):\r\n\r\n list_sizer = self.scroll_entity_list.GetSizer()\r\n number_of_ctrls = self.get_number_of_entity_ctrls()\r\n number_of_entities = self.get_number_of_entities()\r\n difference = number_of_ctrls - number_of_entities\r\n if difference > 0:\r\n for unused_temp in range(difference):\r\n redundant_ctrl = self.entity_ctrl_list.pop()\r\n redundant_ctrl.Destroy()\r\n for entity_index in range(number_of_entities):\r\n entity = self.entities[entity_index]\r\n if entity_index + 1 > number_of_ctrls:\r\n self.add_entity_ctrl(entity)\r\n else:\r\n # set the entity on the existing control\r\n self.entity_ctrl_list[entity_index].set_entity(\r\n entity\r\n )\r\n list_sizer.Layout()", "def add(self, jobShape):\n with self.lock:\n self.jobShapes.append(jobShape)", "def add_shape_shing(shing_set: set, args: argparse.Namespace):\n shing_set.add(TrackShape.LOOP.value) if args.shape == 1 else shing_set.add(TrackShape.CURVE.value)", "def load_sprites(self):\n self.widget_sprites = pygame.sprite.Group()\n env = self.env_list[self.current_env]\n for x in range(env.size):\n for y in range(env.size):\n if type(env.garden_map[y][x]) != tako.Tako:\n if type(env.garden_map[y][x]) != Dirt:\n self.widget_sprites.add(env.garden_map[y][x])\n env.new_sprites = pygame.sprite.Group()\n self.all_sprites = pygame.sprite.Group()\n for tak in env.tako_list:\n self.all_sprites.add(tak)\n for sprite in self.widget_sprites:\n self.all_sprites.add(sprite)\n if self.cam_pos != [0, 0]:\n for spr in self.all_sprites:\n spr.update_rect()\n spr.move_rect(-self.cam_pos[0], -self.cam_pos[1])", "def create_and_add_horiontal_walls_to_list(row_start: int, row_end: int, y: int, wall_list: arcade.SpriteList) -> None:\n #loop creation of wall sprites\n for x in range(row_start * wall_size, row_end * wall_size, wall_size):\n wall = arcade.Sprite(\":resources:images/tiles/boxCrate_double.png\", wall_scaling)\n wall.left = x\n wall.bottom = y * wall_size\n wall_list.append(wall)", "def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))" ]
[ "0.61834466", "0.58497405", "0.5760513", "0.5680979", "0.55177814", "0.5469176", "0.5436238", "0.54036695", "0.5392178", "0.5378375", "0.53718156", "0.53654325", "0.53457034", "0.5342032", "0.5310451", "0.5253857", "0.5238692", "0.52258265", "0.5207183", "0.52023184", "0.5193018", "0.5178425", "0.5167262", "0.51654625", "0.5156848", "0.51563925", "0.5141927", "0.512304", "0.5112747", "0.5104402", "0.508101", "0.5060354", "0.5040272", "0.5039671", "0.50372183", "0.50344515", "0.50263715", "0.5014669", "0.50092775", "0.50050586", "0.5004246", "0.49975502", "0.49927694", "0.49875948", "0.49868426", "0.4978789", "0.49719185", "0.49701142", "0.49672383", "0.4947747", "0.49354595", "0.49315402", "0.49299392", "0.4916903", "0.49106926", "0.49101788", "0.49091777", "0.49047032", "0.48992", "0.48782668", "0.48745805", "0.4873973", "0.4863438", "0.4853844", "0.48501876", "0.48184958", "0.48184296", "0.48146397", "0.47958836", "0.47935835", "0.478631", "0.4782299", "0.4775707", "0.477467", "0.47711924", "0.47706434", "0.47673517", "0.4766413", "0.47659332", "0.4763619", "0.4748275", "0.4744385", "0.47440952", "0.47418463", "0.47381893", "0.47345963", "0.472719", "0.47266474", "0.47263885", "0.47186306", "0.4714108", "0.4713293", "0.4712867", "0.4700475", "0.47002357", "0.4694418", "0.46941102", "0.46918556", "0.46892473", "0.4687441" ]
0.82262385
0
Draw the entity as a block, converting the y pixels values for 0,0 being bottom left not top left
def draw_entity(self, entity: Entity): if entity.shape_sprite: return self.update_shape_sprite(entity) left = (entity.x - entity.half_width) right = (entity.x + entity.half_width) # because arcade 0 on y is the bottom of the screen not the top bottom = abs((entity.y + entity.half_height) - SCREEN_HEIGHT) # bottom = entity.y - entity.half_height - SCREEN_HEIGHT top = abs((entity.y - entity.half_height) - SCREEN_HEIGHT) # top = entity.y + entity.half_height - SCREEN_HEIGHT arcade.draw_lrtb_rectangle_filled( left = left, right = right, bottom = bottom, top = top, color = COLOUR_MAP[entity.base_colour], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_block(position, color):\n x = position.col*DX+DX+2\n y = position.row*DY+DY+2\n width = DX-4\n height = DY-4\n pygame.draw.rect(screen, color, (x,y,width,height), 0)", "def draw_block_element(self, cr, x, y):\n cr.rectangle(\n self.wall_width+x*self.block_size, \n (self.block_height-y-1)*self.block_size, \n self.block_size, self.block_size\n )\n \n cr.set_source_rgb(0.2, 0.25, 0.5)\n cr.fill_preserve()\n\n cr.set_source_rgb(0.8,0.8,0.8)\n cr.set_line_width(self.block_size/10)\n cr.stroke()", "def draw_block(cls, coord):\n block, _ = MapModel.get_block(coord)\n\n block_size = Resolution.get_primary().block_height\n block_surface = BlockTexture.load_texture(block.type, block.variant)\n cls.view.surface.blit(\n block_surface,\n coord.relative(\n depth=cls.depth, section=cls.section, scale=block_size\n ),\n )\n # Render decorations.\n # TODO: This will NOT work with offsets into unrendered blocks!\n decor, offset = block.decoration\n if decor:\n decoration = BlockTexture.load_texture(decor)\n cls.view.surface.blit(\n decoration,\n coord.get_adjacent(offset).relative(\n depth=Depth.of(coord.row),\n section=Section.of(coord.col),\n scale=block_size,\n ),\n )", "def draw_foreground(self):\n index = 0\n for tile in self.foreground_data:\n if tile != self.empty_tile:\n x_pos = (index * self.tile_size) % self.w\n y_pos = math.floor((index * self.tile_size) / self.w) * self.tile_size\n b = Block(tile, x_pos, y_pos)\n self.screen.entity_layer_1.add(b)\n index += 1", "def _draw_blocks(self):\n\t\tsurface = pygame.display.get_surface()\n\t\tcolors = {\"J\": (15, 105, 245), \"I\": (85, 235, 255), \n\t\t\t\t \"L\":(255, 170, 0), \"S\": (45, 255, 55), \"Z\": (255, 4, 0),\n\t\t\t\t \"O\": (238, 255, 0), \"T\": (245, 0, 255)}\n\t\ty = math.floor((self.window_height - (self.window_height*0.9))/2)\n\t\tx = math.floor((self.window_width - ((self.window_height*0.9)/20)*10)/2)\n\t\tincrement = math.floor((self.window_height*0.9)/20)\n\t\t# loops through board and draws to the correct spot\n\t\tfor i in range(4, len(self.gameboard.get_board())):\n\t\t\tfor j in range(len(self.gameboard.get_board()[i])):\n\t\t\t\tx_incremented = math.floor(x + (increment * j))\n\t\t\t\ty_incremented = math.floor(y + (increment * (i-4)))\n\t\t\t\tif self.gameboard.get_board()[i][j][0] in colors:\n\t\t\t\t\tpygame.draw.rect(surface, colors[self.gameboard.get_board()[i][j][0]],\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))\n\t\t\t\t\t\t\t\t\t# x, y, x_wid, y_len\n\t\t\t\telse:\n\t\t\t\t\tpygame.draw.rect(surface, (0,0,0),\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))", "def draw(self, screen, size_block):\n for co in self.get_all_coordinates():\n pos = self.board.coordinate_to_position(co)\n screen.blit(pygame.transform.scale(self.image, (size_block, size_block)), pos)", "def _draw_block(self, block: Tuple[int, int], kind: str) -> None:\n # ToDo: implement display picture: https://pythonprogramming.net/displaying-images-pygame/\n if self.board_formatting[kind]['picture'] is not None:\n raise Exception('Displaying pictures has not yet been implemented!')\n else:\n rectangle = [block[1] * self.block_length, block[0] * self.block_length,\n self.block_length, self.block_length]\n pygame.draw.rect(self.display, self.board_formatting[kind]['color'], rectangle)", "def draw(self):\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(self.border, (0, 0))\n self.screen.blit(self.border, (LEVEL_WIDTH - PLAYFIELD_PADDING[0], 0))\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n if self.blocks[y][x] == '0':\n pass\n else:\n self.screen.blit(self.block_types[self.blocks[y][x]],\n (PLAYFIELD_PADDING[0] + x * Block.WIDTH,\n PLAYFIELD_PADDING[1] + y * Block.HEIGHT))\n self.screen.blit(self.editor_cursor_block,\n self.position_grid_to_screen(self.editor_cursor_position))\n self.screen.blit(self.label_help_top, self.editor_help_top_padding)\n self.screen.blit(self.label_current_block_type, self.editor_info_padding)\n self.screen.blit(self.block_types[self.available_block_types[self.current_block_type]],\n (self.editor_info_padding[0] + 100, self.editor_info_padding[1]))\n # print str(self.editor_cursor_position) + \" \" +\n # str(self.position_grid_to_screen(self.editor_cursor_position))", "def __init__(self, color, x, y):\n \n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Create the image of the block of appropriate size\n # The width and height are sent as a list for the first parameter.\n self.image = pygame.Surface([block_width, block_height])\n \n # Fill the image with the appropriate color\n self.image.fill(color)\n \n # Fetch the rectangle object that has the dimensions of the image\n self.rect = self.image.get_rect()\n \n # Move the top left of the rectangle to x,y.\n # This is where our block will appear..\n self.rect.x = x\n self.rect.y = y", "def draw(self):\n\t\tself.screen.fill(pygame.Color('black'))\n\t\tfor column in self.model.blocks:\n\t\t\tfor block in column:\n\t\t\t\tr = pygame.Rect(block.left,\n\t\t\t\t\t\t\t\tblock.top,\n\t\t\t\t\t\t\t\tblock.size,\n\t\t\t\t\t\t\t\tblock.size)\n\t\t\t\tpygame.draw.rect(self.screen, block.color,r)\n\t\tpygame.display.update()", "def __init__(self, color, x, y):\r\n\r\n # Call the parent class (Sprite) constructor\r\n super().__init__()\r\n\r\n # Create the image of the block of appropriate size\r\n # The width and height are sent as a list for the first parameter.\r\n self.image = pygame.Surface([block_width, block_height])\r\n\r\n # Fill the image with the appropriate color\r\n self.image.fill(color)\r\n\r\n # Fetch the rectangle object that has the dimensions of the image\r\n self.rect = self.image.get_rect()\r\n\r\n # Move the top left of the rectangle to x,y.\r\n # This is where our block will appear..\r\n self.rect.x = x\r\n self.rect.y = y", "def draw(self, screen, size_block):\n pos = self.board.coordinate_to_position(self.coordinate)\n screen.blit(pygame.transform.scale(self.image, (size_block, size_block)), (pos[0], pos[1]))", "def draw(self, frame):\n self.block_bot.draw(frame, self.structure_offset, self.invert_y)\n self.block_mid.draw(frame, self.structure_offset, self.invert_y)\n self.block_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw bars\n self.bars_bot.draw(frame, self.structure_offset, self.invert_y)\n self.bars_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw spring\n self.spring_bot.draw(frame, self.structure_offset, self.invert_y)\n self.spring_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw point C\n self.draw_C(frame)", "def draw_block(self):\n draw_component = DrawComponent(self.component_spot,self.component_type)\n return draw_component", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def draw_me(self):\r\n\t\tself.image.fill((100, 200, 100))\r\n\t\tif self.active: pg.draw.rect(self.image, (100, 100, 200), self.frame, 3) #if active => draw frame around selected entity width 3\r\n\t\tself.display_surface.blit(self.image, self.rect)", "def __init__(self, game, left, right, bottom, top, col=\"black\"):\n\n # Assign given attributes (ensuring order of coordinates)\n self.game = game\n self.canvas = game.canvas # canvas to draw self on\n self._left = min(left, right)\n self._right = max(left, right)\n self._bottom = min(bottom, top)\n self._top = max(bottom, top)\n self.color = col\n\n # Draw the block\n self._draw()", "def draw(self, parent, cr):\n for x, y in self.get_block_coords():\n parent.draw_block_element(cr, x, y)", "def draw_pixel_to_display(self):\n register = self.return_middle_registers(self.opcode)\n x = self.registers[register[0]]\n y = self.registers[register[1]]\n height = self.opcode & 0xF\n\n self.registers[0xF] = 0\n\n x = bit_utils.wrap_around(x, self.display.width)\n y = bit_utils.wrap_around(y, self.display.height)\n\n for yline in range(0, height):\n pixels = self.memory[self.I + yline]\n y1 = bit_utils.wrap_around(y + yline, self.display.height)\n for xline in range(0, 8):\n x1 = bit_utils.wrap_around(x + xline, self.display.width)\n if pixels & (0x80 >> xline) != 0:\n if self.display.set_pixel(x1, y1):\n self.registers[0xF] = 1\n\n self.display.draw_flag = True\n logger.info(\"Drawing sprite from {} to {} at {}, {}\".format(\n hex(self.I),\n hex(self.I + height),\n x, y))", "def blit(self, x, y):\n xcoord = [int(math.ceil(x)), int(math.floor(x))]\n ycoord = [int(math.ceil(y)), int(math.floor(y)), int(math.ceil(y))+1]\n for i in xcoord:\n for j in ycoord:\n if (in_range(i,j)):\n self.blocks[i][j].blit()", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def render(self, context):\n pygame.draw.rect(context, (255, 0, 0), self.box)", "def draw_a50(self):\r\n\t\tpg.draw.rect(self.image, (100, 200, 100), self.rect)\r\n\t\r\n\t\t#self.display_surface.blit(self.image, self.rect)\r", "def blitme(self):\n self.screen.blit(self.image, self.rect)\n # print('y = ' + str(self.rect.centery))\n # print('x = ' + str(self.rect.centerx))", "def _calculate_content(self, points):\n xs = [p.x for p in points]\n ys = [p.y for p in points]\n min_x = min(xs)\n max_x = max(xs)\n min_y = min(ys)\n max_y = max(ys)\n self._dx = max_x - min_x\n self._dy = max_y - min_y\n return pygame.Rect(min_x, min_y, self._dx, self._dy)", "def draw():", "def top_blit(self, x, y):\n xcoord = [int(math.ceil(x)), int(math.floor(x))]\n ycoord = [int(math.ceil(y)), int(math.floor(y)), int(math.ceil(y))+1]\n for i in xcoord:\n for j in ycoord:\n if (in_range(i,j)):\n if (self.blocks[i][j].image == Tree1):\n gameDisplay.blit(Tree1Part, self.blocks[i][j].realcoordinates)\n elif (self.blocks[i][j].image == Tree2):\n gameDisplay.blit(Tree2Part, self.blocks[i][j].realcoordinates)", "def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))\n \n for y in range(0, HEIGHT, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))", "def _get_sprites_block(self, block, region):\n level_data = Group()\n for idx_row, row in enumerate(block):\n for idx_elem, elem in enumerate(row):\n if elem == '-':\n x = idx_elem * BLOCK_WIDTH + SCREEN_RESOLUTION * region\n y = idx_row * BLOCK_HEIGHT\n block = Block(PATH_TO_IMAGE_GRASS, (x, y))\n level_data.add(block)\n elif elem == '+':\n x = idx_elem * WORM_WIDTH + SCREEN_RESOLUTION * region\n y = idx_row * WORM_HEIGHT\n worm = Mob(PATH_TO_IMAGE_WORM, ANIMATIONS_WORMS, (x, y), self.play_sounds)\n level_data.add(worm)\n self.sprite_level_blocks.append(level_data)", "def boundingBox(self):\n pmodel = (glm.vec3(1, -self.y_sign, 0)\n * self.model.pos * self.transform.scale)\n x, y, _ = self.transform.pos + pmodel\n y += -self.y_sign * self.font.table['ascent'] * self.transform.scale[1]\n return x, y, self.pixwidth(), self.pixheight()", "def pixel_space(self):\n self.drawer.settransform()\n self.coordspace_bbox = [0, 0, self.width, self.height]\n self.coordspace_transform = (1, 0, 0,\n 0, 1, 0)", "def draw_frame(self):\n self.render_surface.fill((135, 206, 235))\n # self.render_surface.fill((33, 38, 63))\n self.render_surface.blit(\n self.moon,\n (self.RENDER_SURFACE_WIDTH - 150, 80),\n special_flags=pygame.BLEND_ADD,\n )\n\n # draw background\n self.draw_background()\n\n self.render_surface.blit(\n self.assets.get_character_image(self.player),\n self.camera.translate(self.player.rect),\n )\n\n for enemy in self.enemies:\n pygame.draw.rect(\n self.render_surface, enemy.color, self.camera.translate(enemy.rect)\n )\n self.draw_enemy_health(enemy)\n\n # code to mask perticular block type.\n # for i in self.chunked_map.get_blocks():\n # if i.block_type == 4:\n # pygame.draw.rect(\n # self.render_surface, (255, 255, 255), self.camera.translate(i.rect)\n # )\n\n # draw tiles\n tiles = filter(\n lambda tile: not isinstance(tile, Reward) or tile.is_valid,\n self.chunked_map.get_blocks(),\n )\n tiles = map(self.get_tile_blit_seq, tiles)\n self.render_surface.blits(tiles)\n\n # draw particles\n for particle in self.particle_system.get_active_particles():\n pygame.draw.circle(\n self.render_surface,\n particle.color,\n self.camera.translate_xy(particle.center),\n particle.radius,\n )\n\n # self.draw_fps()\n # self.draw_score()\n self.draw_player_health()\n if self.player.attack_arc_end_deg != 300:\n self.draw_attack_arc(self.player)\n\n for enemy in filter(lambda e: e.attack_arc_end_deg != 300, self.enemies):\n self.draw_attack_arc(enemy)\n\n if not self.player.read_to_take_damage:\n red_s = pygame.Surface(\n (self.RENDER_SURFACE_WIDTH, self.RENDER_SURFACE_HEIGHT)\n )\n red_s.fill((100, 0, 0))\n self.render_surface.blit(red_s, (0, 0), special_flags=pygame.BLEND_ADD)", "def draw(self, screen):", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))", "def draw(self, surface):\n\n surface.fill(BLACK)\n\n for line in range(len(self.structure)):\n for sprite in range(len(self.structure[line])):\n x = sprite * SPRITE_SIZE\n y = line * SPRITE_SIZE\n if self.structure[line][sprite] == 'b': \n if line == 0 and sprite == 0: #top left corner\n self.draw_top_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line > 0 and sprite > 0 and self.structure[line - 1][sprite] == 'n' and self.structure[line][sprite - 1] == 'n': # top left corner\n self.draw_top_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line == 0 and sprite == len(self.structure[line]) - 1 : # top right corner\n self.draw_top_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line > 0 and sprite < len(self.structure[line]) - 1 and self.structure[line - 1][sprite] == 'n' and self.structure[line][sprite + 1] == 'n': # top right corner\n self.draw_top_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line == len(self.structure) - 1 and sprite == len(self.structure[line]) - 1 : # bottom right corner\n self.draw_bottom_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line < len(self.structure) - 1 and sprite < len(self.structure[line]) - 1 and self.structure[line + 1][sprite] == 'n' and self.structure[line][sprite + 1] == 'n': # bottom right corner\n self.draw_bottom_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line == len(self.structure) - 1 and sprite == 0 : # bottom left corner\n self.draw_bottom_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line < len(self.structure) - 1 and sprite > 0 and self.structure[line + 1][sprite] == 'n' and self.structure[line][sprite - 1] == 'n': # bottom left corner\n self.draw_bottom_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n else:\n rect(surface, BLUE, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n elif self.structure[line][sprite] == 'n': \n rect(surface, BLACK, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n elif self.structure[line][sprite] == 'o': \n rect(surface, BLACK, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n elif self.structure[line][sprite] == 'v':\n rect(surface, GREEN, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n \n for pacgum in self.pacgums:\n x = pacgum[0] * SPRITE_SIZE + SPRITE_SIZE // 2\n y = pacgum[1] * SPRITE_SIZE + SPRITE_SIZE // 2\n circle(surface, YELLOW, (x, y), SPRITE_SIZE // 5)", "def _draw_mystery_block(self, instance: MysteryBlock, shape: pymunk.Shape,\n view: tk.Canvas, offset: Tuple[int, int]) -> List[int]:\n if instance.is_active(): # if MysteryBlock is active\n image = self.load_image(\"coin\")\n else:\n image = self.load_image(\"coin_used\")\n\n return [view.create_image(shape.bb.center().x + offset[0], shape.bb.center().y,\n image=image, tags=\"block\")]", "def widthAndHeight(block):\n block.left = min(list(zip(*block.coords))[0])\n block.right = max(list(zip(*block.coords))[0])\n block.top = min(list(zip(*block.coords))[1])\n block.bottom = max(list(zip(*block.coords))[1])\n block.width = (\n block.right - block.left +\n 1\n ) * 4\n block.height = (\n block.bottom - block.top +\n 1\n ) * 4", "def draw(self):", "def pygDraw(self):\n x1,y1 = float(self.x), float(self.y) # bottom left\n x2,y2 = float(self.x+self.width), float(self.y) # bottom right\n x3,y3 = float(self.x+self.width), float(self.y+self.height) # Top right \n x4,y4 = float(self.x), float(self.y+self.height) # Top left\n \n glBegin(GL_QUADS)\n glVertex3f(x4, y4, 0.0)\t# Top left\n glVertex3f(x3, y3, 0.0)\t# Top right\n glVertex3f(x2, y2, 0.0)\t# bottom right\n glVertex3f(x1, y1, 0.0)\t# bottom left\n glEnd()", "def display(self):\n print('\\n' * (self.__y), end='')\n for point in range(self.__height):\n print(' ' * self.__x, end='')\n for point in range(self.__width - 1):\n # print(' ' * self.__x, end='')\n print('#', end='')\n print('#')", "def __init__(self):\n self.t_height = 291\n self.t_left = 65\n self.t_right = 144\n self.points = Pix()", "def __init__(self, color, width, height):\n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n \n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self, y):\n pygame.sprite.Sprite.__init__(self)\n \n self.image = pygame.Surface((1081, 17))\n self.image = self.image.convert()\n self.image.fill((255, 255, 255))\n self.rect = self.image.get_rect()\n self.rect.top = y\n self.rect.left = -1", "def _render_vertical(self, gc, lx, ly, rx, ry, mx, my):\n mx = lx + (rx - lx) / 2.\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_v(gc, lx, ly, rx, mx, my)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_v(gc, lx, ly, rx, mx, my)", "def __draw(self, screen):\n\n pygame.draw.rect(screen, (200, 255, 200), (self.x, self.y, self.width, self.height))", "def rect(framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n for _x in range(x, x + width):\n for _y in range(y, y + height):\n if _x in [x, x + width] or _y in [y, y + height]:\n GS2HMSBFormat.set_pixel(framebuf, _x, _y, color)", "def update(self):\r\n \r\n # Desplaza el bloque un píxel hacia abajo.\r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-300, -20) \r\n \r\n else:\r\n self.rect.y += 5\r\n \r\n # Si el bloque estuviera muy abajo, lo restablecemos a la parte superior de la pantalla.\r", "def create_hard_block_at(self, x, y):\n cell_size = self.map.get_cell_size()\n obj = HardBlock(\n parent=self.map,\n style={\n 'width': cell_size, \n 'height': cell_size * 2, \n 'z-index': layers['object'] }\n )\n # I am a hard block, I can stop the fire without being destroyed\n fireblocking(block(obj))\n\n self.map.add_node(obj, x, y, 0, -cell_size)\n return obj", "def _set_y_block_size(self):\n self._scene_gen.block_dimensions = (self._scene_gen.block_dimensions[X],\n self._block_size_y_spinbox.value(),\n self._scene_gen.block_dimensions[Z])\n self._refresh_view()", "def _create_drawing_area(self):\n\n self.drawing_x = -self.size/2 + self.margin\n self.drawing_y = self.size/2 - self.margin\n self.drawing_width = self.size - self.margin * 2\n self.drawing_height = (self.size/2 + self.flat_fragment) - self.margin * 2\n \n self.drawing_x_step = self.drawing_width \n self.drawing_y_step = self.drawing_height", "def draw(self):\n view_plane = self.view_space.view_plane\n if view_plane == 'XY':\n self._drawXYentities()\n elif view_plane == 'YZ':\n self._drawYZentities()\n else:\n self._drawXZentities()\n return self", "def render(self):\n for r in range(self.y_size):\n line = ''\n for c in range(self.x_size):\n glyph = self.MAP_GLYPH_TABLE[self.grid_data[r][c]]\n\n # overwrite with player\n if r == self.player_y and c == self.player_x:\n glyph = self.PLAYER_GLYPH_TABLE[self.player_heading]\n\n line += glyph\n print(line)\n\n print('\\n' * (20 - self.y_size))", "def draw_on(self, surface):\n for x, y in self.alive_cells():\n #size = (self.box_size, self.box_size)\n #position = (x * self.box_size, y * self.box_size)\n #thickness = 1\n pygame.draw.rect(surface, DARK_RED, (x * self.box_size, y * self.box_size,self.box_size, self.box_size ))", "def draw(self, screen):\n for branch_points in self.branches:\n pygame.draw.polygon(screen, self.branch_color, branch_points)\n for bottom_points in self.bottom:\n pygame.draw.polygon(screen, self.bottom_color, bottom_points)", "def render(self, screen) -> None:\n for y in range(self.width):\n for x in range(self.height):\n if self.get_value(Point(x, y)) == FieldState.SNAKE:\n draw.rect(screen, WHITE, (x*SIZE, y*SIZE, SIZE, SIZE))", "def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)", "def draw_brick(self, x, y):\n pygame.draw.rect(self.main_surface, self.color, (x, y, self.width, self.height), 0)\n pygame.display.update()", "def __init__(self, color, width, height):\n \n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n \n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def draw(self, parent, cr):\n for y, row in enumerate(self.matrix):\n for x, cell in enumerate(row):\n if cell:\n parent.draw_block_element(cr, x, y)", "def display(self):\n for i in range(self.height - 1, 0, -1):\n for j in range(self.width):\n # yield i, j - 1, self.grid[i][j - 1]\n yield j, i, self.dungeon.tile(Point(j, i))\n\n \"\"\"\n def __iter__(self):\n for i in range(self.height):\n for j in range(self.width):\n yield Point(x=self.x + j, y=self.y + i)\n \"\"\"", "def draw(self):\n x = self.displacement.x + self.physics_canvas.origin_x\n y = self.displacement.y + self.physics_canvas.origin_y\n self.canvas_id = self.physics_canvas.canvas.create_rectangle(x-10,y+10,x+10,y-10, fill='black') # e.g.", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def init_blocks(self):\n length = self.physics.len_blocks\n rect = Rectangle(Vector(self.rpos.x, self.rpos.y),\n Vector(self.rpos.x + length, self.rpos.y + length))\n self.rects.append(rect)\n self.physics.add_block(rect, self.stype)", "def draw_grid(self, darken=1):\n if not(0 < darken < 1):\n darken = 1\n for x in range(0, int(self.settings['grid_size'])):\n for y in range(0, int(self.settings['grid_size'])):\n if self.grid[x][y] == g.EMPTY:\n if (x + y) % 2 == 0:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (93 * darken, 216 * darken, 228 * darken), r)\n else:\n rr = pygame.Rect((x * self.block_width, y * self.block_width),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (84 * darken, 194 * darken, 205 * darken), rr)\n elif self.grid[x][y] == g.WALL:\n rr = pygame.Rect((x * self.block_width, y * self.block_width), (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (175 * darken, 34 * darken, 6 * darken), rr)\n elif self.grid[x][y] == g.PLAYER:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (17 * darken, 24 * darken, 47 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)\n elif self.grid[x][y] == g.FOOD:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (223 * darken, 163 * darken, 49 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)", "def Draw(self):\n\t\tGameImage.Draw(self, self.coords)", "def grid_init(self):\n # draw.line(surface, color, start_pos, end_pos, width/thickness)\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, GameData.square_size),\n (GameData.screen_dim, GameData.square_size),\n GameData.line_width\n )\n # # 2 horizontal\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, 2 * GameData.square_size),\n (GameData.screen_dim,2 * GameData.square_size),\n GameData.line_width\n )\n\n # # 1 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (GameData.square_size, 0),\n (GameData.square_size, GameData.screen_dim),\n GameData.line_width\n )\n # # 2 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (2 * GameData.square_size, 0),\n (2 * GameData.square_size, GameData.screen_dim),\n GameData.line_width)", "def map_blit(self):\n for l in self.blocks:\n for b in range(len(l)):\n l[b].blit()", "def drawRect (self, x, y, w, h, colour):\r\n for i in range (y,y+h):\r\n row = self.image [i]\r\n\r\n for j in range (x,x+w):\r\n row [j] = colour", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def draw(self, frame):\n xpos = OFS + self.x * TILE_SIZE\n ypos = OFS + self.y * TILE_SIZE\n frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image", "def create_block(self, x, y, block_type):\n sprite_stack = self.get_sprite(x, y)\n if sprite_stack:\n sprite = sprite_stack[-1]\n sprite.image = block_type\n return\n\n # no existing block, so create a new one\n block_x = x * self.block_x + self.offset_x + self.menu_x\n block_y = y * self.block_y + self.offset_y\n\n bar = Sprite(\"\", image_data=block_type, x=block_x, y=block_y)\n if (x, y) in self.sprites:\n self.sprites[(x, y)].append(bar)\n else:\n self.sprites[(x, y)] = [bar]", "def drawMap(self):\n for position, contain in self.map.items():\n if contain is \"block\":\n self.blocks.add(Block(position[1]*50,position[0]*50))\n elif contain is \"Coins\":\n self.Coins.add(Coins(position[1]*50+10,position[0]*50+10))", "def __init__(self):\n\n # Call the parent class (sprite) constructor\n super().__init__()\n # Create image of block and fill with color.\n self.image = pygame.Surface([20, 20])\n self.image.fill(BLACK)\n\n # Fetch rectangle object that has dimensions of image. Update position of object by setting values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def Bas():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1+20,X2,Y2+20)", "def draw(self):\n\n super().draw()\n \n self.dim = self.getdim()\n start_x, start_y, = self.x(), self.y()\n\n for y in range(self.r):\n for x in range(self.c):\n x_pos, y_pos = start_x + (self.dim * x), start_y + (self.dim * y)\n self.tiles[y][x].resize(x_pos, y_pos, self.dim, self.dim)", "def display(self):\n print(\"\\n\" * self.__y, end=\"\")\n for i in range(self.__height):\n print(\" \" * self.__x, end=\"\")\n print(\"#\" * self.__width, end=\"\")\n print()", "def draw_bounding_boxes(display, bounding_boxes):\n\n bb_surface = pygame.Surface((VIEW_WIDTH, VIEW_HEIGHT))\n bb_surface.set_colorkey((0, 0, 0))\n for bbox in bounding_boxes:\n points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]\n # draw lines\n # base\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[2])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[3])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[0])\n # top\n pygame.draw.line(bb_surface, BB_COLOR, points[4], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[5], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[6], points[7])\n pygame.draw.line(bb_surface, BB_COLOR, points[7], points[4])\n # base-top\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[4])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[7])\n display.blit(bb_surface, (0, 0))", "def draw_grid():\r\n screen.fill((0,0,0))\r\n pygame.draw.line(screen, (255,255,255),(WIDTH/3,0),(WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(2*WIDTH/3,0),(2*WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(0,HEIGHT/3),(WIDTH,HEIGHT/3))\r\n pygame.draw.line(screen, (255,255,255),(0,2*HEIGHT/3),(WIDTH,2*HEIGHT/3))", "def drawOrigin():\n if xMin < 0 < xMax:\n if yMin < 0 < yMax:\n x, y = cartesianToScreen(0, 0)\n\n pygame.draw.line(display, WHITE, (x - 6, y),\n (x + 6, y), 3)\n\n pygame.draw.line(display, WHITE, (x, y - 6),\n (x, y + 6), 3)", "def __draw(self, display, color, size):\n\t\tif self.walls[0]: # up\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size) , (self.col * size + size, self.row * size))\n\t\tif self.walls[3]: # down\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size + size), (self.col * size , self.row * size + size))\n\t\tif self.walls[1]: #left\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size) , (self.col * size + size, self.row * size + size))\n\t\tif self.walls[2]: #right\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size + size), (self.col * size , self.row * size))\n\n\t\tif self.current:\n\t\t\tdraw_rect_with_alpha(display, self.CURRENT_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.backtracked and self.SHOW_BACKTRACK:\n\t\t\tdraw_rect_with_alpha(display, self.BACKTRACKED_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.visited:\n\t\t\tdraw_rect_with_alpha(display, self.VISITED_COLOR, Vector((self.col, self.row)) * size, (size, size))", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.pixels = []\n self.r = 255\n self.g = 0\n self.b = 0\n self.pointSize = 30\n self.vr = 255\n self.vg = 200\n self.vb = 200\n self.glclear()", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour", "def __init__(self):\n self.x = 0\n self.y = 0", "def __init__(self):\n self.x = 0\n self.y = 0", "def draw(self,renderer,dx,dy):\n for i in self.itemType.find('display'):\n if i.tag == 'rect':\n colors = i.find('color').text[1:-1].split(',')\n SDL_SetRenderDrawColor(renderer,int(colors[0]),int(colors[1]),int(colors[2]),int(colors[3]) if len(colors) > 3 else 255)\n rect = SDL_Rect()\n rect.x, rect.y = self.getPos()\n rect.x, rect.y = rect.x+dx,rect.y+dy\n rect.w, rect.h = self.getSize()\n SDL_RenderFillRect(renderer,rect)", "def update(self):\n left_height = self.left.height if self.left else -1\n right_height = self.right.height if self.right else -1\n self.height = 1 + max(left_height, right_height)\n self.bf = right_height - left_height", "def __init__(self):\n pygame.init()\n pygame.display.set_caption(TITLE)\n self.screen = pygame.display.set_mode(WIN_SIZE)\n\n self.clock = pygame.time.Clock()\n self.score_pos = CENTER_W, BLOCK_H // 2\n\n self.white_bar = pygame.Surface((WIN_W, BLOCK_H))\n self.white_bar.fill((255, 255, 255))\n self.white_bar = self.white_bar.convert()\n self.block = pygame.Surface(BLOCK_SIZE)\n self.block.fill(Color.RED)\n self.red_block = self.block.convert()\n self.block.fill(Color.GREEN)\n self.green_block = self.block.convert()", "def example_BSR():\n pts = [(1,1),(2,2),(3,3)]\n lines = [ [ (1,1), (1,2), (2,1)], [ (6,1), (1,6), (5,-1)] ]\n\n bloody_simple_2drender('2d_render.png', pts=pts, vecs=pts, lines=lines )", "def render(self, game):\n pygame.draw.rect(game.screen,\n self.colour,\n (int(self.x), int(self.y), self.a, self.b))", "def display(self):\n\n print(\"\\n\" * self.__y, end='') # y offset\n\n for i in range(self.__height):\n print(\" \" * self.__x, end='') # x offset\n print(\"#\" * self.__width)", "def blockpoints(pix, coords, size):\n xs, ys = coords\n for x in range(xs,xs+size):\n for y in range(ys,ys+size):\n yield pix[x,y]", "def DrawBase(screen, base_x, base_y, base_len, base_width):\n pygame.draw.rect(screen, (255,0,0),(base_x, base_y, base_len*2, base_width*2), 4)", "def draw_world(self, world):\n\n # Clear screen\n self.window.fill(self.background_color)\n\n # Draw board\n for y in range(0, self.height):\n for x in range(0, self.width):\n c = BLACK\n\n blip_count = 0\n if world.map[y][x].blips:\n blip_count = len(world.map[y][x].blips)\n\n # Color code the blip's health %\n total_status = (0, 0, 0)\n\n for b in world.map[y][x].blips:\n status = b.get_status()\n total_status = tuple(map(sum, zip(total_status, status)))\n\n # Get average health in case of multiple blips\n hp = min(total_status)\n c = (255, 255 * hp / blip_count, 0)\n\n elif world.map[y][x].type == \"water\":\n c = WATER\n elif world.map[y][x].type == \"forest\":\n # Make sure the tile doesn't disappear completely\n fill_percent = max(world.map[y][x].value / params.FOOD_SIZE, 0.2)\n c = (0, 255 * fill_percent, 0)\n\n pygame.draw.rect(self.window, c, self.board[y][x], 0)\n\n # Add count for multiple blips in a tile\n if blip_count > 1:\n self.add_text(str(blip_count), BLACK, self.board[y][x].center, self.font)\n\n # Draw grid lines\n for i in range(0, self.height):\n screen_y = i * self.block_size\n pygame.draw.line(self.window, OUTLINE, (0, screen_y), (self.width * self.block_size, screen_y), 2)\n for i in range(0, self.width):\n screen_x = i * self.block_size\n pygame.draw.line(self.window, OUTLINE, (screen_x, 0), (screen_x, self.height * self.block_size), 2)\n\n # Write population count\n pos = (self.width * self.block_size / 2, 20)\n self.add_text(str(len(world.blips.keys())), WHITE, pos, self.counter_font)\n\n # Render to screen\n pygame.display.flip()", "def box(self, x0, y0, width, height):\n assert width > 1\n assert height > 1\n\n width -= 1\n height -= 1\n\n for x in range(x0, x0 + width):\n self.point(x, y0, \"-\")\n self.point(x, y0 + height, \"-\")\n\n for y in range(y0, y0 + height):\n self.point(x0, y, \"|\")\n self.point(x0 + width, y, \"|\")\n\n self.point(x0, y0, \"+\")\n self.point(x0 + width, y0, \"+\")\n self.point(x0, y0 + height, \"+\")\n self.point(x0 + width, y0 + height, \"+\")", "def display(self):\n print(\"\\n\" * self.y, end='')\n for i in range(self.height):\n for j in range(self.width + self.x):\n if j < self.x:\n print(' ', end='')\n else:\n print('#', end='')\n print('')", "def draw(self):\n if self.open:\n self.xpos += (200-self.xpos) * 0.1\n else:\n self.xpos += (-self.xpos) * 0.1\n\n # get the display size\n dispw, disph = c_int(), c_int()\n SDL_GetRendererOutputSize(self.rend,dispw,disph)\n\n # don't waste resources drawing the pallet if it isn't onscreen\n if self.xpos > 5:\n #draw the background for the tile pallet\n SDL_SetRenderDrawColor(self.rend,0,0,0,200)\n rect = SDL_Rect()\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-200),0,200,disph.value\n SDL_RenderFillRect(self.rend,rect)\n\n # draw edge line \n SDL_SetRenderDrawColor(self.rend,255,255,255,255)\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-1),0,1,disph.value\n SDL_RenderFillRect(self.rend,rect)\n\n # draw tile previews\n for i in range(len(self.itemList.items)+1):\n # highlight selected tile\n if i-1 == self.selected:\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-185),i*150+45-self.scroll,138,138\n SDL_SetRenderDrawColor(self.rend,255,255,255,100)\n SDL_RenderFillRect(self.rend,rect)\n # draw tile preview\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-180),i*150+50-self.scroll,128,128\n if i >= 1:\n for x in self.itemList.items[i-1].find('display'):\n if x.tag == 'rect':\n colors = x.find('color').text[1:-1].split(',')\n SDL_SetRenderDrawColor(self.rend,int(colors[0]),int(colors[1]),int(colors[2]),int(colors[3]) if len(colors) > 3 else 255)\n SDL_RenderFillRect(self.rend,rect)\n #SDL_RenderCopy(self.rend,self.tileSet.getTex(i),None,rect)\n SDL_SetRenderDrawColor(self.rend,255,255,255,255)\n\n # draw the file name for the tile\n quickRenderText(self.rend,self.ft_Mono16,self.itemList.items[i-1].find('name').text.strip(),rect.x,rect.y+128)\n else:\n #SDL_RenderCopy(self.rend,self.tileSet.getTex(i),None,rect)\n SDL_SetRenderDrawColor(self.rend,255,255,255,255)\n\n # draw the file name for the tile\n quickRenderText(self.rend,self.ft_Mono16,\"Edit Only\",rect.x,rect.y+128)", "def _block(self, x, y):\n\n cells = []\n\n ix = x - (x%3)\n iy = y - (y%3)\n\n for y in range(iy, iy+3):\n for x in range(ix, ix+3):\n i = self._index(x, y)\n cells.append(self.data[i])\n\n return cells", "def draw_block():\n turtle.down()\n turtle.begin_fill()\n turtle.pensize(3)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.end_fill()\n turtle.up()", "def blitme(self): \n positions = [(10, -10), (70, 10), (130, 40), (190, 90), (20, 110), (50, 250), (100, 150), (210, 180), \n (260, 310), (300, 370), (360, 430), (420, 500), (490, 560), (550, 610), (235, 680), (330, 730), (385, 800),\n (475, 800), (10, -20), (700, 10), (630, 450), (770, 100), (720, 400), (800, 200), (580, 300), (720, 0), \n (600, 70), (500, 250), (200, 10), (350, 40), (500, 70), (450, 600), (650, 500), (200, 500), (270, 550), \n (100, 450), (30, 530), (100, 350), (400, 200), (680, 170), (780, 200)]\n for position in positions:\n self.screen.blit(self.image, position)", "def _draw(self):\n\n # Draw a rectangle on the game's canvas\n self.sprite = self.canvas.create_rectangle(self.left, self.bottom,\n self.right, self.top, fill=self.color)" ]
[ "0.67088455", "0.66308796", "0.6437608", "0.64263946", "0.6355193", "0.6322639", "0.6320987", "0.6217817", "0.6151702", "0.6137533", "0.61204463", "0.61078066", "0.60902894", "0.60667974", "0.5956826", "0.58381814", "0.5831373", "0.57956904", "0.5794884", "0.57708657", "0.576466", "0.5733773", "0.5715093", "0.5700312", "0.5649023", "0.5638822", "0.5615942", "0.55953467", "0.5582787", "0.55506945", "0.5550553", "0.5543271", "0.55390584", "0.55387145", "0.5528753", "0.5511153", "0.55053765", "0.5502059", "0.549616", "0.54913884", "0.54788446", "0.547096", "0.5464195", "0.5458151", "0.54547364", "0.54515", "0.5447877", "0.5435171", "0.5434083", "0.54236776", "0.54193336", "0.5412236", "0.54074144", "0.54018676", "0.5401237", "0.5384751", "0.53689826", "0.53655565", "0.5363015", "0.5361198", "0.5358684", "0.53570956", "0.5340647", "0.5331176", "0.53310066", "0.5328881", "0.53231734", "0.5320745", "0.5309319", "0.5309241", "0.53057337", "0.53040874", "0.5303439", "0.52996254", "0.5297524", "0.5292425", "0.5287347", "0.5281489", "0.5273283", "0.5271782", "0.5265954", "0.5263266", "0.5260566", "0.5260566", "0.5258522", "0.52523774", "0.5249672", "0.52476376", "0.5242764", "0.5240488", "0.5234477", "0.52338517", "0.52302927", "0.5228299", "0.52213687", "0.52185875", "0.521396", "0.521196", "0.52055573", "0.52053607" ]
0.5946452
15
Draw a path for visual debugging
def draw_path(self, path): for path_point in path: arcade.draw_lrtb_rectangle_filled( left = path_point[0] - 3, right = path_point[0] + 3, bottom = abs(path_point[1] + 3 - SCREEN_HEIGHT), top = abs(path_point[1] - 3 - SCREEN_HEIGHT), color = COLOUR_MAP[Colour.YELLOW.value], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_path(self):\r\n if len(self.path) > 1:\r\n for i in range(1, len(self.path)):\r\n pg.draw.line(self.screen, (0, 150, 0),\r\n self.path[i - 1], self.path[i], 1)\r\n elif len(self.path) == 1:\r\n pg.draw.circle(self.screen, (0, 150, 0),\r\n (int(self.path[0].x), int(self.path[0].y)), 1)", "def drawPath(self):\r\n bgl.glColor4f(0.8,0.8,0.9,0.01)\r\n bgl.glLineWidth(0.01)\r\n\r\n bgl.glBegin(bgl.GL_LINES)\r\n bgl.glVertex3f(self.p1[0],self.p1[1],self.p1[2])\r\n bgl.glVertex3f(self.p2[0],self.p2[1],self.p2[2])\r\n bgl.glEnd()\r\n\r\n bgl.glNormal3f(0.0,0.0,1.0)\r\n bgl.glShadeModel(bgl.GL_SMOOTH);", "def path(self):\n self.renderer.begin_rendering(\"path\")\n self.renderer.draw_polyline_3d(self.guides, self.renderer.white())\n self.renderer.end_rendering()", "def draw_path(self):\n\n # using current data row number\n # switch to appropriate row in file\n self.switch_row(self.data_row_num)\n path = self.list2FloatPairs(self.row)\n\n for pt in path:\n x = int(pt[0])\n y = int(pt[1])\n pts = x,y\n pygame.draw.circle(self.screen,self.BLACK,pts,2)", "def drawPath(self, path=[]):\n subpath = NSBezierPath.alloc().init()\n subpath.moveToPoint_(path[0][0])\n for p in path[1:]:\n if len(p) == 3:\n # curve\n A, B, C = p\n subpath.curveToPoint_controlPoint1_controlPoint2_(C, A, B)\n else:\n subpath.lineToPoint_(p[0])\n\n subpath.closePath()\n NSColor.colorWithCalibratedRed_green_blue_alpha_(\n 0, 0, 1, self.alpha\n ).set()\n subpath.stroke()", "def draw_path(self, path, color):\n\n half_width = self.cell_width/2\n half_height = self.cell_height/2\n\n # List of coordinates corresponding to the center of each\n # cell in the path, in a form acceptable to cvs.create_line.\n coord_list = []\n for cell in path:\n coord_list.append((self.cell_width*cell[1] + half_width,\n self.height - (self.cell_height*cell[0] + half_height)))\n \n self.cvs.create_line(coord_list, width=4, fill=color)\n self.draw()", "def print_path(self):\n\n grid = tg.Graph.grid_graph(self.graph.rows,self.graph.cols)\n #tg.draw_grid(self.draw_edges_alt,self.graph.rows,self.graph.cols,grid)\n tg.draw_grid(self.edges,self.graph.rows,self.graph.cols,grid)", "def display_path(self, path):\n graph = path.graph\n if not graph:\n return\n for v in sorted(graph.vertices()):\n p = graph.get_vertex_attribute(v, 'xy')\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('define v{} ellipse 2 2 c_vertex {} {}'.format(v, x, y))\n #print('define v{0}t text {0} 14 white {1} {2}'.format(v, x, y))\n for u, v in graph.edges():\n print('define - link v{} v{} 1 c_edge'.format(u, v))\n # NOTE: this code assumes paths will not move indefinitely\n print('fix /./')", "def draw_path(self, path: Path, properties: Properties) -> None:\n if len(path):\n vertices = iter(\n path.flattening(distance=self.max_flattening_distance)\n )\n prev = next(vertices)\n for vertex in vertices:\n self.draw_line(prev, vertex, properties)\n prev = vertex", "def visualize(self) -> None:\n if len(self._path_points) <= 0:\n raise RuntimeError(\"Can't visualise a path with no points.\")\n\n tip = self._mobile\n self._drawing_handle = sim.simAddDrawingObject(\n objectType=sim.sim_drawing_lines, size=3, duplicateTolerance=0,\n parentObjectHandle=-1, maxItemCount=99999,\n ambient_diffuse=[1, 0, 1])\n sim.simAddDrawingObjectItem(self._drawing_handle, None)\n init_pose = self._mobile.get_2d_pose()\n self._mobile.set_2d_pose(self._path_points[0][:3])\n prev_point = list(tip.get_position())\n\n for i in range(len(self._path_points)):\n points = self._path_points[i]\n self._mobile.set_2d_pose(points[:3])\n p = list(tip.get_position())\n sim.simAddDrawingObjectItem(self._drawing_handle, prev_point + p)\n prev_point = p\n\n # Set the arm back to the initial config\n self._mobile.set_2d_pose(init_pose[:3])", "def drawPaths(points, lines, height, lineWidth, pointRadius):\r\n\r\n\tlineArraySize = len(lines)\r\n\tpointArraySize = len(points)\r\n\tlineArrayItems = lineArraySize / 4\r\n\tpointArrayItems = pointArraySize / 2\r\n\r\n\r\n\tglLineWidth(lineWidth)\r\n\tglPointSize(pointRadius)\r\n\r\n\tglColor4f(0.0, 0.0, 1.0, 1.0)\r\n\tglNormal3f(0.0, 0.0, 1.0)\r\n\r\n\tglDisable(GL_TEXTURE_2D)\r\n\r\n\tglBegin(GL_LINES)\r\n\r\n#\tglLoadIdentity()\r\n\r\n\tfor i in range(lineArrayItems):\r\n\t\tglVertex3f(lines[i * 4], height - lines[i * 4 + 1], 0.1)\r\n\t\tglVertex3f(lines[i * 4 + 2], height - lines[i * 4 + 3], 0.1)\r\n\r\n\tglEnd()\r\n\r\n\tglBegin(GL_POINTS)\r\n\r\n#\tglLoadIdentity()\r\n\r\n\tfor i in range(pointArrayItems):\r\n\t\tglVertex3f(points[i * 2], height - points[i * 2 + 1], 0.11)\r\n\r\n\tglEnd()\r\n\r\n\tglEnable(GL_TEXTURE_2D)", "def DrawPath(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawPath(*args, **kwargs)", "def _drawPath(self, path, layer=0, existing=[]):\n items = existing\n if not items:\n items.append(self._plt.plot(path.xs, path.ys, pen=_PT_PEN))\n else:\n items[-1].setData(path.xs, path.ys)\n Visualiser._setLayer(items, layer)\n return items", "def draw_path(path, colour='blue'):\n codes = [Path.LINETO] * len(path)\n codes[0] = Path.MOVETO\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(\n Path(path, codes),\n facecolor=colour,\n lw=2\n )\n x_min, y_min = path[0]\n x_max, y_max = path[0]\n\n for x, y in path:\n if x < x_min:\n x_min = x\n if x > x_max:\n x_max = x\n if y < y_min:\n y_min = y\n if y > y_max:\n y_max = y\n\n ax.add_patch(patch)\n ax.set_xlim(x_min - BORDER, x_max + BORDER)\n ax.set_ylim(y_min - BORDER, y_max + BORDER)\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()", "def test_path(self, x_path, y_path):\n\t\tplt.plot(x_path, y_path, 'bo')\n\t\tplt.plot(x_path, y_path, 'b-')\n\t\tplt.show()", "def print_path(window, source, dest):\n path = []\n curr_node = dest\n while curr_node.prev:\n path.append(curr_node)\n curr_node = curr_node.prev\n path.append(source)\n path = path[::-1] # reverse the path to display source->dest and not dest->source\n for node in path:\n if not node.is_colored:\n block = get_block_from_node(node)\n block.draw(window, PATH_COLOR)", "def draw():", "def print_path(self, path, marks = []):\n\n result = ''\n\n for y in range(1, self.height + 1):\n for x in range(1, self.width + 1):\n # Draw top line\n if (x, y - 1) in self.get_reachables(x, y):\n result += '+ '\n else: result += '+--'\n\n result += '+\\n'\n\n for x in range(1, self.width + 1):\n # Draw horizontal passage\n if (x - 1, y) in self.get_reachables(x, y):\n result += ' '\n else: result += '|'\n\n\n if (x, y) in path:\n if (x, y) in path[-1:]:\n result += '(X'\n else: result += ' x'\n elif (x, y) in marks:\n result += ' #'\n else: result += ' '\n\n result += '|\\n'\n\n if y == self.height:\n for x in range(1, self.width + 1):\n # Draw bottom line\n result += '+--'\n\n return result + '+'", "def __draw_path(\n self, x_path, y_path, opt_line, opt_marker,\n opt_colour, thickness=0.05):\n # Get colour\n colour = self.__get_colour_from_string(opt_colour)\n\n # For every point in the list, draw a line to the next one\n # (excluding last point)\n for point in range(0, len(x_path)):\n # Get point 1\n x1 = x_path[point]\n y1 = y_path[point]\n p1 = vector(x1, y1, 0)\n\n # If at end / only coordinate - draw a marker\n if point == len(x_path) - 1:\n create_marker(self.scene, x1, y1, opt_marker, colour)\n return\n\n # Get point 2\n x2 = x_path[point + 1]\n y2 = y_path[point + 1]\n p2 = vector(x2, y2, 0)\n\n if opt_line == '':\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == '-':\n create_line(\n p1, p2, self.scene, colour=colour, thickness=thickness)\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == '--':\n create_segmented_line(\n p1, p2, self.scene, 0.3, colour=colour,\n thickness=thickness)\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == ':':\n create_segmented_line(\n p1, p2, self.scene, 0.05, colour=colour,\n thickness=thickness)\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == '-.':\n raise NotImplementedError(\"Other line types not implemented\")\n else:\n raise ValueError(\"Invalid line type given\")", "def show_path(image, path):\r\n \r\n for i in range(len(path)-1):\r\n image = cv2.line(image,path[i],path[i+1],(150,150,150))\r\n return image", "def drawpath(self,obstacles):\n for i in obstacles:\n self.distance_map[i[0],i[1]]=44\n print(\"Distance map\")\n print(self.distance_map)\n for i in self.footprint:\n self.distance_map[i[0],i[1]]=88\n print(\"Evaluated path\")\n print(self.distance_map)", "def _drawRays(self):\r\n for rayID, ray in self.rayDict.items():\r\n ray.drawPath()", "def draw_path(self, path):\n palettes = pokemontools.map_gfx.read_palettes(self.config)\n map_image = pokemontools.map_gfx.draw_map(self.map_group_id, self.map_id, palettes, show_sprites=True, config=self.config)\n\n for coordinates in path:\n y = coordinates[0]\n x = coordinates[1]\n\n some_image = Image.new(\"RGBA\", (32, 32))\n draw = ImageDraw.Draw(some_image, \"RGBA\")\n draw.rectangle([(0, 0), (32, 32)], fill=(0, 0, 0, 127))\n\n target = [(x * 4, y * 4), ((x + 32) * 4, (y + 32) * 4)]\n\n map_image.paste(some_image, target, mask=some_image)\n\n return map_image", "def plot_path(self, current_path):\n full_path = current_path.copy()\n full_path.insert(0, self.root)\n\n path = Marker()\n id = 1\n\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n path.color.r = 0.0\n path.color.g = 1.0\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n for node in full_path:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.03\n path.points.append(p1)\n\n self.pub_path.publish(path)", "def debug_draw(i, segment):\n segment = np.array(segment)\n # pylint: disable=import-outside-toplevel\n from pyiem.plot.use_agg import plt\n\n (fig, ax) = plt.subplots(1, 1)\n ax.plot(segment[:, 0], segment[:, 1], c=\"b\")\n ax.plot(CONUS[\"poly\"].exterior.xy[0], CONUS[\"poly\"].exterior.xy[1], c=\"r\")\n mydir = tempfile.gettempdir()\n LOG.warning(\"writting %s/%sdebugdraw.png\", mydir, i)\n fig.savefig(f\"{mydir}/{i}debugdraw.png\")\n return fig", "def draw_point_path(pnt_path: List[Point], color: C3F, point_size: float) -> None:\n n = len(pnt_path)\n if n > 0:\n if n > 1:\n vertices = [coord for pnt in pnt_path for coord in pnt.coords()]\n pyglet.graphics.draw(\n n,\n pyglet.gl.GL_LINE_STRIP,\n (GeoDrawer._VERTEX_MODE, vertices),\n (GeoDrawer._COLOR_MODE, color * n),\n )\n for pnt in pnt_path:\n GeoDrawer.draw_point(pnt, point_size, color)", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n self._symbol_cycle = None\n return super().draw_path(renderer, gc, path, affine, rgbFace) # noqa: N803", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n self._symbol_cycle = None\n return super().draw_path(renderer, gc, path, affine, rgbFace) # noqa: N803", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n self._symbol_cycle = None\n return super().draw_path(renderer, gc, path, affine, rgbFace) # noqa: N803", "def draw_path(map: np.ndarray, x_old: float, y_old: float, x_new: float, y_new: float, color: Tuple[int]) -> np.ndarray:\n x_old = int(round(x_old, 0))\n y_old = int(round(y_old, 0))\n x_new = int(round(x_new, 0))\n y_new = int(round(y_new, 0))\n\n cv2.line(map, (x_old, y_old), (x_new, y_new), color, 3)\n return map", "def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)", "def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def draw(self, viewer):\n self.path.crdmap = self.crdmap\n\n self.recalc(viewer)\n\n if len(self.path.points) > 0:\n self.path.draw(viewer)", "def show_path(self):\n\n node = self.goal\n\n while node.parent:\n node.parent.value = 1\n node = node.parent", "def create_path(self): # weights, path_data, show_path=True):\n\n # control variables\n max_distance_to_pf = np.linalg.norm(np.subtract(\n self.pf, np.sum([self.p0, self.d2*self.l2/2], axis=0)))\n current_position_index = [0, int(len(self.lines_list)/2)] # X, Y\n final_position_index = [\n len(self.lines_list[0]) - 1, int(len(self.lines_list)/2)] # X, Y\n points_displaced = 0\n max_fishing = 0\n\n # main path, which goes directly from initial to final points\n for i in range(len(self.lines_list[0])):\n self.x_list_main.append(\n self.lines_list[int(len(self.lines_list)/2)][i][\"position\"][0])\n self.y_list_main.append(\n self.lines_list[int(len(self.lines_list)/2)][i][\"position\"][1])\n self.fish_prob_list_main .append(\n self.lines_list[int(len(self.lines_list)/2)][i][\"fish\"])\n\n path_index = [[current_position_index[0], current_position_index[1]]]\n\n self.fish_prob_list.append(\n self.lines_list[current_position_index[1]][current_position_index[0]][\"fish\"])\n\n while current_position_index != final_position_index:\n score = -float('inf')\n max_score = -float('inf')\n a_max = 0\n b_max = 0\n for a in range(-1, 2, 1): # self.d2 values\n for b in range(0, 2, 1): # self.d1 values\n if a == 0 and b == 0:\n continue # we should always move\n elif [current_position_index[0] + b, current_position_index[1] + a] not in path_index:\n # we get the next point data from the self.lines_list variables\n try:\n values = self.lines_list[current_position_index[1] +\n a][current_position_index[0] + b]\n score = self.weights[\"fish\"] * values[\"fish\"] \\\n - self.weights[\"straight_line_distance\"] * values[\"distance_to_l1\"] / (self.l2/2) \\\n - self.weights[\"final_point_distance\"] * \\\n values[\"distance_to_pf\"] / max_distance_to_pf\n #+ self.weights[\"fuel\"] * values[\"fuel\"] + self.weights[\"area\"] * values[\"area\"]\n\n except IndexError:\n # Position not reachable\n continue\n\n if score > max_score:\n max_score = score\n max_fishing = values[\"fish\"]\n a_max = a\n b_max = b\n\n current_position_index[0] += b_max # X\n current_position_index[1] += a_max # Y\n path_index.append([current_position_index[0],\n current_position_index[1]]) # X Y\n\n self.fish_prob_list.append(max_fishing)\n\n points_displaced += 1\n\n step_filter = 3\n paths_coincide = True\n path_index_filtered = []\n index_to_insert = []\n counter = 0\n\n # filter points\n while True:\n\n if counter % step_filter == 0 or path_index[counter] == path_index[-1]:\n if len(path_index) - counter < step_filter and not path_index[counter] == path_index[-1]:\n step_filter = len(path_index) - counter\n\n try:\n index_to_insert = []\n paths_coincide = True\n for i in range(counter, counter+step_filter, 1):\n paths_coincide = (\n path_index[i][1] == path_index[i+1][1]) and paths_coincide\n index_to_insert.append(path_index[i])\n\n except Exception:\n paths_coincide = False\n\n if paths_coincide:\n for index in index_to_insert:\n path_index_filtered.append(index)\n\n for j in range(counter, counter+step_filter, 1):\n self.fish_prob_list_filtered.append(\n self.fish_prob_list[j])\n\n counter += step_filter\n\n else:\n path_index_filtered.append(path_index[counter])\n self.fish_prob_list_filtered.append(\n self.fish_prob_list[counter])\n counter += 1\n\n else:\n counter += 1\n\n if counter >= len(path_index):\n break\n\n # we pass from index to real positions\n for index in path_index:\n self.x_list.append(\n self.lines_list[index[1]][index[0]][\"position\"][0])\n self.y_list.append(\n self.lines_list[index[1]][index[0]][\"position\"][1])\n\n for index in path_index_filtered:\n self.x_list_filtered.append(\n self.lines_list[index[1]][index[0]][\"position\"][0])\n self.y_list_filtered.append(\n self.lines_list[index[1]][index[0]][\"position\"][1])\n\n return [[self.x_list_main, self.y_list_main, 'green'], [self.x_list, self.y_list, 'red'],\n [self.x_list_filtered, self.y_list_filtered, 'blue']], \\\n [self.fish_prob_list, self.fish_prob_list_filtered,\n self.fish_prob_list_main]", "def create_path(number_of_vertices, radius, thickness):\n\n global screen\n dist_apart = radius * 3\n\n for i in range(0, number_of_vertices):\n vtx_x = i*dist_apart + int((WINDOW_WIDTH - dist_apart * (number_of_vertices - 1)) / 2)\n vtx_y = int(WINDOW_HEIGHT / 2)\n\n vtx = {\"ID\": i,\n \"x\": vtx_x,\n \"y\": vtx_y,\n \"color\": \"WHITE\",\n \"adjacent\": [],\n }\n\n VERTICES.append(vtx);\n\n # Assign adjacency\n for i in range(0, number_of_vertices):\n if i is not number_of_vertices - 1:\n VERTICES[i][\"adjacent\"].append(VERTICES[i + 1][\"ID\"])\n VERTICES[i + 1][\"adjacent\"].append(VERTICES[i][\"ID\"])\n\n draw_graph(VERTICES, radius, thickness)", "def addPath(self, path=[]):\n if path:\n path = [\n self.transform_reverse.transformPoints(pts) for pts in path\n ]\n if self.trace:\n self.path.append(path)\n else:\n self.drawPath(path)", "def plot_path( polylines, mymap ):\n #remark : there is bug in lib pymaps : it change path....\n map( \\\n lambda path : mymap.addpath(list(path),\"#00FF00\"),\n polylines)", "def path_plot(robot_path, regions, obs):\n\n for robot, path in robot_path.items():\n # prefix path\n if len(path) == 1:\n continue\n x_pre = np.asarray([point[0] + 0.5 for point in path])\n y_pre = np.asarray([point[1] + 0.5 for point in path])\n plt.quiver(x_pre[:-1], y_pre[:-1], x_pre[1:] - x_pre[:-1], y_pre[1:] - y_pre[:-1],\n color=\"#\" + ''.join([random.choice('0123456789ABCDEF') for j in range(6)]),\n scale_units='xy', angles='xy', scale=1, label='prefix path')\n\n plt.savefig('img/path.png', bbox_inches='tight', dpi=600)", "def plot_path(path):\n s = np.linspace(0, path.total_length, 1000, endpoint=False)\n twists = np.array(list(path.target_state(si) for si in s))\n print(twists.shape)\n plt.plot(twists[:,0], twists[:,1])\n plt.show()", "def visualize_path(start, end, distance, path, time_dij, time_ida, ida_terminated):\n \n # get all cities to be plotted to figure\n neighbors = []\n cities_to_figure = set()\n for city in path:\n cities_to_figure.add(city)\n for neighbor_tuple in adjlist[city]:\n neighbor = neighbor_tuple[0]\n if neighbor not in path:\n neighbors.append(neighbor)\n cities_to_figure.add(neighbor)\n for neighbor_of_neighbor_tuple in adjlist[neighbor]:\n neighbor_of_neighbor = neighbor_of_neighbor_tuple[0]\n cities_to_figure.add(neighbor_of_neighbor)\n # get min_x, max_x, min_y, max_y\n min_x, max_x, min_y, max_y = 100, 0, 100, 0\n for city in list(cities_to_figure):\n min_x, max_x, min_y, max_y = min(min_x, coordinates[city][0]), max(max_x, coordinates[city][0]), min(min_y, coordinates[city][1]), max(max_y, coordinates[city][1])\n \n # set size of window to be popped up to user\n height = 9.5\n width = 0.5 * (max_x-min_x) / (max_y-min_y) * height\n fig = plt.figure(figsize=(width, height))\n \n # visited cities and visualization\n xs, ys = [], []\n for city in path:\n x, y = coordinates[city][0], coordinates[city][1]\n xs.append(x)\n ys.append(y)\n plt.annotate(city, (x,y), textcoords=\"offset points\", xytext=(0,5), ha=\"center\")\n plt.scatter(xs, ys, s=50, color=\"blue\")\n plt.plot(xs, ys, color=\"blue\")\n \n # neighboring cities\n xs2, ys2 = [], []\n for neighbor in neighbors:\n if neighbor not in path:\n x, y = coordinates[neighbor][0], coordinates[neighbor][1]\n xs2.append(x)\n ys2.append(y)\n plt.annotate(neighbor, (x,y), textcoords=\"offset points\", xytext=(0,5), ha=\"center\")\n # neighbors of neighbors and visualization\n for neighbor in neighbors:\n for neighbor_of_neighbor_tuple in adjlist[neighbor]:\n neighbor_of_neighbor = neighbor_of_neighbor_tuple[0]\n x, y = coordinates[neighbor_of_neighbor][0], coordinates[neighbor_of_neighbor][1]\n if neighbor_of_neighbor not in path and neighbor_of_neighbor not in neighbors:\n xs2.append(x)\n ys2.append(y)\n plt.annotate(neighbor_of_neighbor, (x,y), textcoords=\"offset points\", xytext=(0,5), ha=\"center\")\n x2, y2 = coordinates[neighbor][0], coordinates[neighbor][1]\n plt.plot([x, x2], [y, y2], color=\"grey\", linestyle=\"dashed\")\n plt.scatter(xs2, ys2, s=5, color=\"grey\")\n\n # duration\n hours = int(distance // 1)\n minutes = int(round(distance % 1 * 60, 0))\n duration_string = str(hours) + \" hours \" + str(minutes) + \" minutes\"\n \n if ida_terminated:\n plt.title(\"Shortest path: \" + start + \" -> \" + end + \"\\n(\" + duration_string + \")\")\n else: # ida did not finish\n plt.title(\"Shortest path: \" + start + \" -> \" + end + \"\\n(\" + duration_string + \")\")\n \n plt.axis('off')\n plt.show()", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n # Set up a new graphics context for rendering the front effect; override the color\n gc0 = self._override_gc(renderer, gc, foreground=self.color)\n\n # Get the information we need for drawing along the path\n starts, offsets, angles = self._process_path(path, affine)\n\n # Figure out what segments the markers should be drawn upon and how\n # far within that segment the markers will appear.\n segment_indices, marker_offsets = self._get_marker_locations(offsets, renderer)\n\n # Draw the original path\n renderer.draw_path(gc0, path, affine, rgbFace) # noqa: N803\n\n # Need to account for the line width in order to properly draw symbols at line edge\n line_shift = renderer.points_to_pixels(gc.get_linewidth()) / 2\n\n # Loop over all the markers to draw\n for ind, marker_offset in zip(segment_indices, marker_offsets):\n sym_trans = self._get_symbol_transform(renderer, marker_offset, line_shift,\n angles[ind], starts[ind])\n renderer.draw_path(gc0, self._symbol, sym_trans,\n self.color if self.filled else None)\n\n gc0.restore()", "def draw_path(self, renderer, gc, tpath, affine, rgbFace): # noqa: N803\n # Do not modify the input! Use copy instead.\n gc0 = self._override_gc(renderer, gc, foreground=self._color)\n\n theta = -np.radians(self._angle)\n trans_matrix = np.array([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n\n # Convert spacing parameter to pixels.\n spacing_px = renderer.points_to_pixels(self._spacing)\n\n # Transform before evaluation because to_polygons works at resolution\n # of one -- assuming it is working in pixel space.\n transpath = affine.transform_path(tpath)\n\n # Evaluate path to straight line segments that can be used to\n # construct line ticks.\n polys = transpath.to_polygons(closed_only=False)\n\n for p in polys:\n x = p[:, 0]\n y = p[:, 1]\n\n # Can not interpolate points or draw line if only one point in\n # polyline.\n if x.size < 2:\n continue\n\n # Find distance between points on the line\n ds = np.hypot(x[1:] - x[:-1], y[1:] - y[:-1])\n\n # Build parametric coordinate along curve\n s = np.concatenate(([0.0], np.cumsum(ds)))\n s_total = s[-1]\n\n num = int(np.ceil(s_total / spacing_px)) - 1\n # Pick parameter values for ticks.\n s_tick = np.linspace(spacing_px / 2, s_total - spacing_px / 2, num)\n\n # Find points along the parameterized curve\n x_tick = np.interp(s_tick, s, x)\n y_tick = np.interp(s_tick, s, y)\n\n # Find unit vectors in local direction of curve\n delta_s = self._spacing * .001\n u = (np.interp(s_tick + delta_s, s, x) - x_tick) / delta_s\n v = (np.interp(s_tick + delta_s, s, y) - y_tick) / delta_s\n\n # Normalize slope into unit slope vector.\n n = np.hypot(u, v)\n mask = n == 0\n n[mask] = 1.0\n\n uv = np.array([u / n, v / n]).T\n uv[mask] = np.array([0, 0]).T\n\n # Rotate and scale unit vector into tick vector\n dxy1 = np.dot(uv[0::2], trans_matrix) * self._length * spacing_px\n dxy2 = np.dot(uv[1::2], trans_matrix.T) * self._length * spacing_px\n\n # Build tick endpoints\n x_end = np.zeros(num)\n y_end = np.zeros(num)\n x_end[0::2] = x_tick[0::2] + dxy1[:, 0]\n x_end[1::2] = x_tick[1::2] + dxy2[:, 0]\n y_end[0::2] = y_tick[0::2] + dxy1[:, 1]\n y_end[1::2] = y_tick[1::2] + dxy2[:, 1]\n\n # Interleave ticks to form Path vertices\n xyt = np.empty((num, 2), dtype=x_tick.dtype)\n xyt[:, 0] = x_end\n xyt[:, 1] = y_end\n\n # Build up vector of Path codes\n codes = np.concatenate([[mpath.Path.MOVETO], [mpath.Path.LINETO] * (len(xyt) - 1)])\n\n # Construct and draw resulting path\n h = mpath.Path(xyt, codes)\n\n # Transform back to data space during render\n renderer.draw_path(gc0, h, affine.inverted() + affine, rgbFace) # noqa: N803\n\n gc0.restore()", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n # Do not modify the input! Use copy instead.\n gc0 = renderer.new_gc()\n gc0.copy_properties(gc)\n\n gc0 = self._update_gc(gc0, self._gc)\n trans = affine + self._offset_transform(renderer)\n\n theta = -np.radians(self._angle)\n trans_matrix = np.array([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n\n # Convert spacing parameter to pixels.\n spacing_px = renderer.points_to_pixels(self._spacing)\n\n # Transform before evaluation because to_polygons works at resolution\n # of one -- assuming it is working in pixel space.\n transpath = affine.transform_path(path)\n\n # Evaluate path to straight line segments that can be used to\n # construct line scallops.\n polys = transpath.to_polygons(closed_only=False)\n\n for p in polys:\n x = p[:, 0]\n y = p[:, 1]\n\n # Can not interpolate points or draw line if only one point in\n # polyline.\n if x.size < 2:\n continue\n\n # Find distance between points on the line\n ds = np.hypot(x[1:] - x[:-1], y[1:] - y[:-1])\n\n # Build parametric coordinate along curve\n s = np.concatenate(([0.0], np.cumsum(ds)))\n s_total = s[-1]\n\n num = int(np.ceil(s_total / spacing_px)) - 1\n # Pick parameter values for scallops.\n s_tick = np.linspace(0, s_total, num)\n\n # Find points along the parameterized curve\n x_tick = np.interp(s_tick, s, x)\n y_tick = np.interp(s_tick, s, y)\n\n # Find unit vectors in local direction of curve\n delta_s = self._spacing * .001\n u = (np.interp(s_tick + delta_s, s, x) - x_tick) / delta_s\n v = (np.interp(s_tick + delta_s, s, y) - y_tick) / delta_s\n\n # Handle slope of end point\n if (x_tick[-1], y_tick[-1]) == (x_tick[0], y_tick[0]): # periodic\n u[-1] = u[0]\n v[-1] = v[0]\n else:\n u[-1] = u[-2]\n v[-1] = v[-2]\n\n # Normalize slope into unit slope vector.\n n = np.hypot(u, v)\n mask = n == 0\n n[mask] = 1.0\n\n uv = np.array([u / n, v / n]).T\n uv[mask] = np.array([0, 0]).T\n\n # Rotate and scale unit vector\n dxy = np.dot(uv, trans_matrix) * self._length * spacing_px\n\n # Build endpoints\n x_end = x_tick + dxy[:, 0]\n y_end = y_tick + dxy[:, 1]\n\n # Interleave ticks to form Path vertices\n xyt = np.empty((2 * num, 2), dtype=x_tick.dtype)\n xyt[0::2, 0] = x_tick\n xyt[1::2, 0] = x_end\n xyt[0::2, 1] = y_tick\n xyt[1::2, 1] = y_end\n\n # Build path vertices that will define control points of the bezier curves\n verts = []\n i = 0\n nverts = 0\n while i < len(xyt) - 2:\n verts.append(xyt[i, :])\n verts.append(xyt[i + 1, :])\n verts.append(xyt[i + 3, :])\n verts.append(xyt[i + 2, :])\n nverts += 1\n i += 2\n\n # Build up vector of Path codes\n codes = np.tile([mpath.Path.LINETO, mpath.Path.CURVE4,\n mpath.Path.CURVE4, mpath.Path.CURVE4], nverts)\n codes[0] = mpath.Path.MOVETO\n\n # Construct and draw resulting path\n h = mpath.Path(verts, codes)\n\n # Transform back to data space during render\n renderer.draw_path(gc0, h, affine.inverted() + trans, rgbFace)\n\n gc0.restore()", "def draw_point(self, p):\n length = 3\n self.set_line_width(0.1)\n self.set_source_rgba(0, 0, 1, 1)\n self.move_to(p.x + length, p.y)\n self.line_to(p.x - length, p.y)\n self.stroke()\n self.move_to(p.x, p.y + length)\n self.line_to(p.x, p.y - length)\n self.stroke()", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n # Set up a new graphics context for rendering the front effect; override the color\n gc0 = self._override_gc(renderer, gc, foreground=self.color)\n\n # Get the information we need for drawing along the path\n starts, offsets, angles = self._process_path(path, affine)\n\n # Figure out what segments the markers should be drawn upon, how\n # far within that segment the markers will appear, and the segment bounds.\n (segment_starts, segment_ends,\n segment_indices, marker_offsets) = self._get_path_locations(offsets, renderer)\n\n # Need to account for the line width in order to properly draw symbols at line edge\n line_shift = renderer.points_to_pixels(gc.get_linewidth()) / 2\n\n # Loop over all the segments to draw\n for start_path, end_path in zip(segment_starts, segment_ends):\n renderer.draw_path(gc0, mpath.Path(starts[start_path:end_path]),\n mtransforms.Affine2D(), None)\n\n # Loop over all the markers to draw\n for ind, marker_offset in zip(segment_indices, marker_offsets):\n sym_trans = self._get_symbol_transform(renderer, marker_offset, line_shift,\n angles[ind], starts[ind])\n\n renderer.draw_path(gc0, self._symbol, sym_trans, self.color)\n\n gc0.restore()", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n # Set up a new graphics context for rendering the front effect; override the color\n gc0 = self._override_gc(renderer, gc, foreground=self.color)\n\n # Get the information we need for drawing along the path\n starts, offsets, angles = self._process_path(path, affine)\n\n # Figure out what segments the markers should be drawn upon, how\n # far within that segment the markers will appear, and the segment bounds.\n (segment_starts, segment_ends,\n segment_indices, marker_offsets) = self._get_path_locations(offsets, renderer)\n\n # Need to account for the line width in order to properly draw symbols at line edge\n line_shift = renderer.points_to_pixels(gc.get_linewidth()) / 2\n\n # Loop over all the segments to draw\n for start_path, end_path in zip(segment_starts, segment_ends):\n renderer.draw_path(gc0, mpath.Path(starts[start_path:end_path]),\n mtransforms.Affine2D(), None)\n\n # Loop over all the markers to draw\n for ind, marker_offset in zip(segment_indices[::2], marker_offsets[::2]):\n sym_trans = self._get_symbol_transform(renderer, marker_offset, line_shift,\n angles[ind], starts[ind])\n\n renderer.draw_path(gc0, self._symbol, sym_trans, self.color)\n\n gc0.restore()", "def __draw_line(display, color, ball_pos, dx, dy):\n pygame.draw.line(display, color, ball_pos, (ball_pos[0] + dx, ball_pos[1] + dy), 2)", "def print_path(path, index):\r\n\r\n print(\"Printing trace for puzzle no. {0}\".format(index))\r\n print_puzzle(path[0][0])\r\n for i in range(1, len(path)):\r\n movement = get_move(path[i-1][1], path[i][1])\r\n\r\n moved_tile = get_value(path[i-1][0], path[i][1])\r\n print(i, \": move \", moved_tile, \" \", movement, sep=\"\")\r\n print_puzzle(path[i][0])\r\n print('')", "def do_draw_network(self, line):\n self.fibbing.root.lsdb.graph.draw(line)", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n gcs = [self._override_gc(renderer, gc, foreground=color) for color in self._colors]\n self._gc_cycle = itertools.cycle(gcs)\n self._symbol_cycle = itertools.cycle([self._symbol, self._symbol2])\n self._color_cycle = itertools.cycle(self._colors)\n self._segment_cycle = itertools.cycle(self._segment_colors)\n\n # Get the information we need for drawing along the path\n starts, offsets, angles = self._process_path(path, affine)\n\n # Figure out what segments the markers should be drawn upon, how\n # far within that segment the markers will appear, and the segment bounds.\n (segment_starts, segment_ends,\n segment_indices, marker_offsets) = self._get_path_locations(offsets, renderer)\n\n # Need to account for the line width in order to properly draw symbols at line edge\n line_shift = renderer.points_to_pixels(gc.get_linewidth()) / 2\n\n # Loop over all the markers to draw\n for ind, marker_offset in zip(segment_indices[::2], marker_offsets[::2]):\n sym_trans = self._get_symbol_transform(renderer, marker_offset, line_shift,\n angles[ind], starts[ind])\n gc = next(self._gc_cycle)\n color = next(self._color_cycle)\n symbol = next(self._symbol_cycle)\n\n renderer.draw_path(gc, symbol, sym_trans, color)\n\n line_shift *= -1\n\n for start_path, mid_path, end_path in zip(segment_starts,\n segment_indices,\n segment_ends):\n color1, color2 = next(self._segment_cycle)\n\n gcx = self._override_gc(renderer, gc, foreground=mcolors.to_rgb(color1))\n renderer.draw_path(gcx, mpath.Path(starts[start_path:mid_path]),\n mtransforms.Affine2D(), None)\n\n gcx = self._override_gc(renderer, gc, foreground=mcolors.to_rgb(color2))\n renderer.draw_path(gcx, mpath.Path(starts[mid_path:end_path]),\n mtransforms.Affine2D(), None)\n\n gcs[0].restore()", "def stroke(self, path, clr):\n pass", "def svg_draw_quick(svg_img, board, pix_ref):\n RDK.Render(False)\n count = 0\n for path in svg_img:\n count = count + 1\n # use the pixel reference to set the path color, set pixel width and copy as a reference\n pix_ref.Recolor(path.fill_color)\n if PIXELS_AS_OBJECTS:\n pix_ref.Copy()\n np = path.nPoints()\n print('drawing path %i/%i' % (count, len(svg_img)))\n for i in range(np):\n p_i = path.getPoint(i)\n v_i = path.getVector(i)\n\n # Reorient the pixel object along the path\n pt_pose = point2D_2_pose(p_i, v_i)\n \n # add the pixel geometry to the drawing board object, at the calculated pixel pose\n if PIXELS_AS_OBJECTS:\n board.Paste().setPose(pt_pose)\n else:\n board.AddGeometry(pix_ref, pt_pose)\n \n RDK.Render(True)", "def draw(self):", "def rectToPath(self,node):\n x = float(node['x'])\n y = float(node['y'])\n w = float(node['width'])\n h = float(node['height'])\n rx = 0\n ry = 0\n if 'rx' in node:\n rx = float(node['rx'])\n if 'ry' in node:\n ry = float(node['ry'])\n\n if rx==0 or ry ==0:\n d ='M %f,%f '%(x,y)\n d+='L %f,%f '%(x+w,y)\n d+='L %f,%f '%(x+w,y+h)\n d+='L %f,%f '%(x,y+h)\n d+='L %f,%f '%(x,y)\n else:\n d ='M %f,%f '%(x+rx,y)\n d+='L %f,%f '%(x+w-rx,y)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x+w,y+ry)\n d+='L %f,%f '%(x+w,y+h-ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x+w-rx,y+h)\n d+='L %f,%f '%(x+rx,y+h)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x,y+h-ry)\n d+='L %f,%f '%(x,y+ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x+rx,y)\n\n return d", "def __printThePath(self, tile):\n print()\n print(\"Path is found. Initial tile: \" + str(self.startingPoint) + \", Goal tile: \" + str(self.goalPoint))\n print(\"Here is the path cost: \" + str(tile.cost) + \" and path is:\")\n print(tile.pathToTile[::-1])\n print()", "def draw_polyline(*points):\r\n global _canvas\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n #print(points)\r\n #print(len(points))\r\n newpoints = []\r\n for x in range(0, len(points), 2):\r\n #print(x)\r\n pt = Point(points[x], points[x+1])\r\n newpoints += [ pt ]\r\n #print(newpoints)\r\n path = Path(*newpoints)\r\n path.setBorderWidth(_current_line_thickness)\r\n path.setBorderColor(_current_color)\r\n _canvas.add(path)", "def draw(self, scene):\n scene.add(svg.Line(start=[self.position[0, 0], self.position[0, 1]], end=[self.position[1, 0], self.position[1, 1]], thickness=3))\n scene.add(svg.Line(start=[self.position[1, 0], self.position[1, 1]], end=[self.position[2, 0], self.position[2, 1]], thickness=3))\n\n scene.add(svg.Circle(center=self.position[0, :], radius=3, color='gray'))\n scene.add(svg.Circle(center=self.position[1, :], radius=3, color='gray'))\n\n # draw trajectory\n X = list(self.end_point_traj)\n for i in range(1, len(self.end_point_traj)):\n x1 = X[i-1]\n x2 = X[i]\n scene.add(svg.Line(start=x1, end=x2, thickness=1, color='red'))\n\n scene.add(svg.Circle(center=self.position[2, :], radius=3, color='blue'))", "def draw_line(x1, y1, x2, y2):\r\n #global _canvas\r\n #global _current_line_thickness\r\n #global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n path = Path(Point(x1, y1), Point(x2, y2))\r\n path.setBorderWidth(_current_line_thickness)\r\n path.setBorderColor(_current_color)\r\n _canvas.add(path)", "def draw(self, screen):", "def draw(self):\n draw(self.graph)", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def drawSeg(self, seg, sfill=SFILL):\n x, y = seg.getStartPoint()\n X, Y = seg.getEndPoint()\n go = self.can.create_line(x, y, X, Y, width=3, fill=sfill)\n seg.addGraphicObject(go)", "def _draw_trajectory(self):\n if self.trajectory is None:\n return\n\n for state in self.trajectory:\n pygame.draw.circle(self.env_img, BLUE, (int(state[0]), int(state[1])), 2)", "def __drawSegment(self, p1, p2, color):\n pygame.draw.aaline(self.screen, color, p1, p2)", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def path_plotter(self, res):\n # define edgepoint of the plot\n x_start = np.min(self.trans_path_x) - self.p * res\n x_end = np.max(self.trans_path_x) + (self.f + self.p) * res\n y_start = np.min(self.trans_path_y) - self.p * res\n y_end = np.max(self.trans_path_y) + (self.f + self.p) * res\n\n # define length of arrays\n x_len = int((x_end - x_start) / res)\n y_len = int((y_end - y_start) / res)\n\n # define x- and y-axis\n self.x = np.arange(x_start, x_end, res)\n self.y = np.arange(y_start, y_end, res)\n\n # define matrix that will be plotted\n self.meas_path = np.ones((y_len, x_len))\n\n # fill the matrix with the measured frames\n for k, frame in enumerate(self.trans_frames):\n start = ((self.trans_frame_start[k][0] - x_start) / res,\n (self.trans_frame_start[k][1] - y_start) / res)\n end = ((self.trans_frame_start[k][0] + (2 * self.p + self.f) * res - x_start) / res,\n (self.trans_frame_start[k][1] + (2 * self.p + self.f) * res - y_start) / res)\n # start = (int((self.trans_path_x[k] - self.p * res - x_start) / res),\n # int((self.trans_path_y[k] - self.p * res - y_start) / res))\n # end = (int((self.trans_path_x[k] + (self.p + self.f) * res - x_start) / res),\n # int((self.trans_path_y[k] + (self.p + self.f) * res - y_start) / res))\n self.meas_path[start[1]:end[1], start[0]:end[0]] = frame\n\n # Plot the path\n fig, ax = plt.subplots(1)\n ax.pcolormesh(self.x, self.y, self.meas_path)\n ax.plot(self.trans_path_x, self.trans_path_y, color='red')\n for k in range(len(self.trans_frames)):\n width = self.f * res\n rect = patches.Rectangle((self.trans_path_x[k], self.trans_path_y[k]), width, width,\n linewidth=1, edgecolor='black', facecolor='none')\n ax.add_patch(rect)\n plt.show()", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n gcs = [self._override_gc(renderer, gc, foreground=color) for color in self._colors]\n self._gc_cycle = itertools.cycle(gcs)\n self._symbol_cycle = itertools.cycle([self._symbol, self._symbol2])\n self._color_cycle = itertools.cycle(self._colors)\n\n # Get the information we need for drawing along the path\n starts, offsets, angles = self._process_path(path, affine)\n\n # Figure out what segments the markers should be drawn upon, how\n # far within that segment the markers will appear, and the segment bounds.\n (segment_starts, segment_ends,\n segment_indices, marker_offsets) = self._get_path_locations(offsets, renderer)\n\n # Need to account for the line width in order to properly draw symbols at line edge\n line_shift = renderer.points_to_pixels(gc.get_linewidth()) / 2\n\n # Loop over all the markers to draw\n for ind, start_path, end_path, marker_offset in zip(segment_indices, segment_starts,\n segment_ends, marker_offsets):\n sym_trans = self._get_symbol_transform(renderer, marker_offset, line_shift,\n angles[ind], starts[ind])\n gc = next(self._gc_cycle)\n color = next(self._color_cycle)\n symbol = next(self._symbol_cycle)\n\n renderer.draw_path(gc, symbol, sym_trans, color)\n renderer.draw_path(gc, mpath.Path(starts[start_path:end_path]),\n mtransforms.Affine2D(), None)\n line_shift *= -1\n\n gcs[0].restore()", "def draw_path(self, renderer, gc, path, affine, rgbFace=None): # noqa: N803\n gcs = [self._override_gc(renderer, gc, foreground=color) for color in self._colors]\n self._gc_cycle = itertools.cycle(gcs)\n self._symbol_cycle = itertools.cycle([self._symbol, self._symbol2])\n self._color_cycle = itertools.cycle(self._colors)\n\n # Get the information we need for drawing along the path\n starts, offsets, angles = self._process_path(path, affine)\n\n # Figure out what segments the markers should be drawn upon and how\n # far within that segment the markers will appear.\n segment_indices, marker_offsets = self._get_marker_locations(offsets, renderer)\n end_path_inds = self._get_path_segment_ends(offsets, renderer)\n start_path_inds = np.concatenate([[0], end_path_inds[:-1]])\n\n # Need to account for the line width in order to properly draw symbols at line edge\n line_shift = -renderer.points_to_pixels(gc.get_linewidth()) / 2\n\n # Loop over all the markers to draw\n for ind, start_path, end_path, marker_offset in zip(segment_indices, start_path_inds,\n end_path_inds, marker_offsets):\n sym_trans = self._get_symbol_transform(renderer, marker_offset, line_shift,\n angles[ind], starts[ind])\n gc = next(self._gc_cycle)\n color = next(self._color_cycle)\n symbol = next(self._symbol_cycle)\n\n renderer.draw_path(gc, symbol, sym_trans, color)\n renderer.draw_path(gc, mpath.Path(starts[start_path:end_path]),\n mtransforms.Affine2D(), None)\n line_shift *= -1\n\n gcs[0].restore()", "def print_path(self, d, parent, s, t):\n idxs = [t]\n while idxs[-1]!=s:\n idxs.append(parent[idxs[-1]])\n idxs.reverse()\n print('[{:g}]'.format(d[t])+' '+'-->'.join([str(self.vertices[i]) for i in idxs]))", "def show_paths(self):\r\n print(\"------------------------\")\r\n print(\"######### ALL PATHS #########\")\r\n\r\n if self.size == 0:\r\n print(\"Empty tree!\")\r\n else:\r\n for i in range(1, self.root.size_tree + 1):\r\n node = self.select(i)\r\n if node.size_tree == 1:\r\n print(\"|\" + self.str_single_path(node))\r\n\r\n print(\"------------------------\")", "def display_debug(self, screen: pygame.Surface):\n\t\tfor p1, p2 in self.__calculate_points():\n\t\t\tpygame.draw.line(screen, Color(255).get(), p1.get_int(), p2.get_int(), 2)\n\t\t\tpygame.draw.circle(screen, Color(255, 0, 0).get(), p1.get_int(), 2)\n\n\t\tpygame.draw.circle(screen, Color(0, 255, 0).get(), self.__target.get_int(), 2)", "def _render(self, gc, points):\n with gc:\n gc.set_antialias(True)\n self._draw_default_axes(gc)\n self._draw_default_grid(gc)\n if len(points)>0:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n gc.set_stroke_color(self.color_)\n gc.set_line_width(self.line_width)\n gc.set_line_dash(self.line_style_)\n\n gc.begin_path()\n gc.lines(points)\n gc.stroke_path()\n\n return", "def make_painter_path(self):\n path = QPainterPath()\n points = self._points\n if points:\n point = points[0]\n path.moveTo(point[0], point[1])\n for i in range(1, len(self._points)):\n point = points[i]\n path.lineTo(point[0], point[1])\n path.closeSubpath()\n return path;", "def draw(self, view, path, style, status):\n\n # Get Object\n self.status = status\n self.view = view\n if not self.thisObj:\n self.thisObj = self.thisMObject()\n\n # Get Variable\n self.getAllAttribute()\n\n # Refresh\n if not self.firstRefresh or self.forceRefresh:\n \n self.vList_id = glFT.glGenLists(1)\n self.firstRefresh = True\n\n if self.shape_type == 0:\n self.squarePrimitive()\n elif self.shape_type == 1:\n self.circlePrimitive()\n\n # Line Poly\n self.getColor(\"polygon\")\n\n # Begin\n view.beginGL()\n glFT.glPushAttrib(OpenMayaRender.MGL_ALL_ATTRIB_BITS)\n\n # Start draw\n if self.xRay:\n glFT.glDisable(OpenMayaRender.MGL_DEPTH_TEST)\n\n glFT.glPushMatrix()\n if self.billBoard: \n self.setBillboardMatrix()\n\n # self.setRotation()\n self.setLocalTransform()\n\n glFT.glEnable(OpenMayaRender.MGL_BLEND)\n \n glFT.glLineWidth(self.edge_size)\n glFT.glCallList(self.vList_id )\n\n if self.billBoard:\n glFT.glPopMatrix()\n\n # End draw\n glFT.glPopAttrib()\n view.endGL()", "def draw(self, screen):\n for branch_points in self.branches:\n pygame.draw.polygon(screen, self.branch_color, branch_points)\n for bottom_points in self.bottom:\n pygame.draw.polygon(screen, self.bottom_color, bottom_points)", "def path(self):\n from matplotlib.path import Path\n codes=[Path.MOVETO]\n for i_code in range(1,len(self.x)-1):\n if self.path_order == 3:\n codes.append(Path.CURVE4)\n elif self.path_order == 2:\n codes.append(Path.CURVE3)\n elif self.path_order == 1:\n codes.append(Path.LINETO)\n else:\n raise ValueError('Polygon.path_order cannot be higher than 3. Returning...')\n\n if self.path_order == 3 or self.path_order == 2:\n codes.append(Path.CURVE3)\n elif self.path_order == 1:\n codes.append(Path.LINETO)\n\n codes.append(Path.CLOSEPOLY)\n\n xy_looped=np.zeros([len(self.x)+1,2])\n xy_looped[0:-1,:]=np.asarray([self.x, self.y]).transpose()\n xy_looped[-1,:]=[self.x[0], self.y[0]]\n\n return Path(xy_looped,codes)", "def draw(self, img_path=None):\n fig, ax = plt.subplots(figsize=self.figsize)\n\n # Set the axis limits\n plt.xlim(self.xlim)\n plt.ylim(self.ylim)\n\n # Draw the nodes\n for node in self.nodes:\n node.add_circle(ax)\n\n # Add the transitions\n for i in range(self.M.shape[0]):\n for j in range(self.M.shape[1]):\n # self loops\n # if i == j:\n if (i == j) and (self.M[i,j] != 0):\n # Loop direction\n if self.nodes[i].y >= 0:\n self.nodes[i].add_self_loop(ax, prob = self.M[i,j], direction='up')\n else:\n self.nodes[i].add_self_loop(ax, prob = self.M[i,j], direction='down')\n # directed arrows\n elif self.M[i,j] > 0:\n self.add_arrow(ax, self.nodes[i], self.nodes[j], prob = self.M[i,j])\n\n plt.axis('off')\n # Save the image to disk?\n if img_path:\n plt.savefig(img_path)\n plt.show()", "def draw_on_world(self, world):\n for index, wp in enumerate(self.waypoints):\n # Adds 0.5 to z to ensure that the point is above the road surface.\n loc = (wp.location +\n pylot.utils.Location(0, 0, 0.5)).as_simulator_location()\n world.debug.draw_point(loc, size=0.1, life_time=DEFAULT_VIS_TIME)\n # if self.road_options and index < len(self.road_options):\n # world.debug.draw_string(loc,\n # str(self.road_options[index]),\n # life_time=DEFAULT_VIS_TIME)", "def draw_line(self, coords, smooth=False, **options):\n # NOTE: Outline does not work because uses paths instead of normal line method.\n # TODO: Add volume param, containing a list of linewidths same length as line\n # or as a function that calculates the width at each node\n # Result is a flow line with varying thickness at each node\n # Have to calculate left/right xy at each node, and use symbol curveto()\n # Easy and really cool...DO IT!\n options = self._check_options(options)\n \n if not hasattr(coords[0], \"__iter__\"):\n coords = _grouper(coords, 2)\n else: coords = (point for point in coords)\n \n # get drawing tools from options\n args = []\n if options[\"fillcolor\"]:\n pen = aggdraw.Pen(options[\"fillcolor\"], options[\"fillsize\"])\n args.append(pen)\n\n if smooth:\n\n # Note: Creation of the aggdraw.Symbol object here can be\n # very slow for long lines; Path is much faster but due\n # to a bug it does not correctly render curves, hence the use\n # of Symbol\n \n pathstring = \"\"\n \n # begin\n coords = _pairwise(coords)\n (startx,starty),(endx,endy) = next(coords)\n pathstring += \" M%s,%s\" %(startx, starty)\n \n # draw straight line to first line midpoint\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" L%s,%s\" %(midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # for each line\n for line in coords:\n # curve from midpoint of first to midpoint of second\n (startx,starty),(endx,endy) = line\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" Q%s,%s,%s,%s\" %(startx, starty, midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # draw straight line to endpoint of last line\n pathstring += \" L%s,%s\" %(endx, endy)\n\n # make into symbol object\n symbol = aggdraw.Symbol(pathstring)\n\n # draw the constructed symbol\n self.drawer.symbol((0,0), symbol, *args)\n\n else:\n\n path = aggdraw.Path()\n \n # begin\n startx,starty = next(coords)\n path.moveto(startx, starty)\n \n # connect to each successive point\n for nextx,nexty in coords:\n path.lineto(nextx, nexty)\n\n # draw the constructed path\n self.drawer.path((0,0), path, *args)", "def draw(self):\n\t\tpass", "def draw_t(self):\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.up()\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def draw_step(ax, line, sizes):\n x1, y1, x2, y2 = line\n\n # Clear & Resize\n ax.cla()\n size = np.sum(sizes) + 1\n g.axis([-size, size, -size, size])\n g.autoscale(False)\n\n # Plot step\n ax.plot([0, x1, x2], [0, y1, y2], lw=2, c='k')\n ax.add_patch(Circle((0, 0), 0.05, fc='k', zorder=10))\n ax.add_patch(Circle((x1, y1), 0.08, fc='b', ec='b', zorder=10))\n ax.add_patch(Circle((x2, y2), 0.08, fc='r', ec='r', zorder=10))", "def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):\n writer = self.writer\n\n writer.comment(s)\n\n glyph_map=self._glyph_map\n\n text2path = self._text2path\n color = rgb2hex(gc.get_rgb())\n fontsize = prop.get_size_in_points()\n\n style = {}\n if color != '#000000':\n style['fill'] = color\n if gc.get_alpha() != 1.0:\n style['opacity'] = short_float_fmt(gc.get_alpha())\n\n if not ismath:\n font = text2path._get_font(prop)\n _glyphs = text2path.get_glyphs_with_font(\n font, s, glyph_map=glyph_map, return_new_glyphs_only=True)\n glyph_info, glyph_map_new, rects = _glyphs\n\n if glyph_map_new:\n writer.start('defs')\n for char_id, glyph_path in six.iteritems(glyph_map_new):\n path = Path(*glyph_path)\n path_data = self._convert_path(path, simplify=False)\n writer.element('path', id=char_id, d=path_data)\n writer.end('defs')\n\n glyph_map.update(glyph_map_new)\n\n attrib = {}\n attrib['style'] = generate_css(style)\n font_scale = fontsize / text2path.FONT_SCALE\n attrib['transform'] = generate_transform([\n ('translate', (x, y)),\n ('rotate', (-angle,)),\n ('scale', (font_scale, -font_scale))])\n\n writer.start('g', attrib=attrib)\n for glyph_id, xposition, yposition, scale in glyph_info:\n attrib={'xlink:href': '#%s' % glyph_id}\n if xposition != 0.0:\n attrib['x'] = short_float_fmt(xposition)\n if yposition != 0.0:\n attrib['y'] = short_float_fmt(yposition)\n writer.element(\n 'use',\n attrib=attrib)\n\n writer.end('g')\n else:\n if ismath == \"TeX\":\n _glyphs = text2path.get_glyphs_tex(prop, s, glyph_map=glyph_map,\n return_new_glyphs_only=True)\n else:\n _glyphs = text2path.get_glyphs_mathtext(prop, s, glyph_map=glyph_map,\n return_new_glyphs_only=True)\n\n glyph_info, glyph_map_new, rects = _glyphs\n\n # we store the character glyphs w/o flipping. Instead, the\n # coordinate will be flipped when this characters are\n # used.\n if glyph_map_new:\n writer.start('defs')\n for char_id, glyph_path in six.iteritems(glyph_map_new):\n char_id = self._adjust_char_id(char_id)\n # Some characters are blank\n if not len(glyph_path[0]):\n path_data = \"\"\n else:\n path = Path(*glyph_path)\n path_data = self._convert_path(path, simplify=False)\n writer.element('path', id=char_id, d=path_data)\n writer.end('defs')\n\n glyph_map.update(glyph_map_new)\n\n attrib = {}\n font_scale = fontsize / text2path.FONT_SCALE\n attrib['style'] = generate_css(style)\n attrib['transform'] = generate_transform([\n ('translate', (x, y)),\n ('rotate', (-angle,)),\n ('scale', (font_scale, -font_scale))])\n\n writer.start('g', attrib=attrib)\n for char_id, xposition, yposition, scale in glyph_info:\n char_id = self._adjust_char_id(char_id)\n\n writer.element(\n 'use',\n transform=generate_transform([\n ('translate', (xposition, yposition)),\n ('scale', (scale,)),\n ]),\n attrib={'xlink:href': '#%s' % char_id})\n\n for verts, codes in rects:\n path = Path(verts, codes)\n path_data = self._convert_path(path, simplify=False)\n writer.element('path', d=path_data)\n\n writer.end('g')", "def draw_routes(self):\n self.vis.draw_routes()", "def draw(self):\n pt = self.getPoint() # Centre of prism\n\n # Form top,left,right corners\n top = Vector2d(pt.z, pt.y + self.height/2)\n d = self.height*math.tan(self.angle/2)\n left = Vector2d(pt.z - d , pt.y - self.height/2)\n right = Vector2d(pt.z + d, pt.y - self.height/2)\n\n\n top.rotate(self.tilt)\n left.rotate(self.tilt)\n right.rotate(self.tilt)\n\n # Plot them out with plt.plot\n plot([top[0],left[0],right[0],top[0]],[top[1],left[1],right[1],top[1]],\"k\",lw=2.0)", "def draw_graph(graph, start, goal, path=[], save_file=None):\n explored = graph.get_explored_nodes()\n node_pos = {n: graph.nodes[n]['pos'] for n in graph.nodes.keys()}\n edge_labels = {}\n for edge in graph.edges():\n edge_labels[edge] = graph[edge[0]][edge[1]]['weight']\n\n labels = {}\n for node in graph:\n labels[node] = node\n\n nx.draw_networkx_nodes(graph, node_pos, node_color='gray') #, nodelist=romania.nodes, node_color='w', node_size=500)\n nx.draw_networkx_edges(graph, node_pos, style='dashed')\n if len(explored) > 0:\n print(\"Explored = \"+str(explored))\n nx.draw_networkx_nodes(graph, node_pos, nodelist=explored, node_color='r')\n\n if len(path) > 0:\n nx.draw_networkx_nodes(graph, node_pos, nodelist= path, node_color='y')\n edgelist = []\n for i in range(1,len(path)):\n edgelist.append((path[i - 1], path[i]))\n nx.draw_networkx_edges(graph, node_pos, edgelist, edge_color='b', width=3)\n nx.draw_networkx_nodes(graph, node_pos, nodelist=[start, goal], node_color='g')\n\n\n\n nx.draw_networkx_labels(graph, node_pos, labels)\n nx.draw_networkx_edge_labels(graph, node_pos, edge_labels, font_size=8)\n\n plt.axis('off')\n plt.show() # display\n if save_file is not None:\n plt.savefig(save_file) # save as png", "def draw_trace(self, trace):\n pts = [xform.chain(p) for p in (trace.p1, trace.p2)]\n self.canvas.line([(p.x, p.y) for p in pts], fill=colour)", "def path(game_id: int, save: bool = True):\n cfg = GameConfig()\n game = get_game(i=game_id) # Dummy game\n params = game.game_params()\n path = params[D_PATH]\n a_star = params[D_A_STAR]\n \n # Define the function\n def get_score(p):\n \"\"\"Get a score for the given path-position.\"\"\"\n temp = path[round(p[0], 1), round(p[1], 1)] / a_star\n return (clip(1 - temp, a_min=0, a_max=1) + clip(1 - temp, a_min=0, a_max=1) ** 2) / 2\n \n # Create the figure\n score = dict()\n for x in range(0, cfg.x_axis * 10 + 1):\n for y in range(0, cfg.x_axis * 10 + 1):\n score[x / 10, y / 10] = get_score((x / 10, y / 10))\n \n # Create the figure\n def fill(x1, x2, y1, y2):\n c = clr.to_hex([1, score[(x2, y2)], 0])\n plt.fill([x1, x1, x2, x2], [y1, y2, y2, y1], c)\n \n fig, ax = plt.subplots()\n divider = make_axes_locatable(ax)\n game.get_blueprint(ax)\n for x in range(0, 140):\n for y in range(0, 140):\n fill(round(x / 10, 1), round((x + 1) / 10, 1), round(y / 10, 1), round((y + 1) / 10, 1))\n game.get_blueprint(ax)\n plt.title(f\"Path-score by position for game {game.id:05d}\")\n cax = divider.append_axes('right', size='5%', pad=0.05) # Create the colorbar\n norm = mpl.colors.Normalize(vmin=0, vmax=1)\n data = np.ones((256, 3))\n data[:, 1] = np.linspace(0.33, 1, 256)\n data[:, 2] = 0\n mpl.colorbar.ColorbarBase(cax, cmap=clr.ListedColormap(data), norm=norm, orientation='vertical')\n plt.tight_layout()\n if save: plt.savefig(f'population/utils/visualizing/images/path_fitness_game_{game.id:05d}.png')\n plt.show()\n plt.close()", "def draw(self, canvas):\n super().draw(canvas, self.__path)", "def path_plot(self, path=[90, 90], num_points=100, normalization=1.0, latex=False):\n from matplotlib import pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n x_values = []\n tick_label = []\n\n # Plot the first path\n end = path[0]*np.pi/180.0\n theta = np.linspace(0.0, end, num_points).tolist()\n phi = 0.0\n counter = 0\n gamma = []\n angles = []\n tick_label.append(0)\n for t in theta:\n x_values.append(counter)\n gamma.append(self.eval(t, phi))\n angles.append((t, phi))\n counter += 1\n\n # Plot second path\n theta = end\n end = path[1]*np.pi/180.0\n phi = np.linspace(0.0, end, num_points).tolist()\n counter -= 1\n tick_label.append(counter)\n for p in phi:\n x_values.append(counter)\n gamma.append(self.eval(theta, p))\n angles.append((theta, p))\n counter += 1\n\n # Plot third path (back to origin)\n theta = np.linspace(0.0, theta, num_points)[::-1]\n theta = list(theta)\n phi = end\n counter -= 1\n tick_label.append(counter)\n for t in theta:\n x_values.append(counter)\n gamma.append(self.eval(t, phi))\n counter += 1\n angles.append((t, phi))\n tick_label.append(counter-1)\n\n gamma = np.array(gamma)*normalization\n ax.plot(x_values, gamma)\n if latex:\n ax.set_ylabel(r\"Surface tension (mJ/\\$m^2\\$\")\n else:\n ax.set_ylabel(\"Surface tension (mJ/$m^2$\")\n ax.set_xticklabels([])\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.set_xticks(tick_label)\n ax.set_xticklabels([(0, 0), (path[0], 0), (path[0], path[1]), (0, path[1])])\n return fig", "def draw(self, shape):\n shape.draw(shader=self.shader)", "def FillPath(*args, **kwargs):\n return _gdi_.GraphicsContext_FillPath(*args, **kwargs)", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def show_path_2D(start, end, coordinates, polygons, clear = True):\n global L, N, delta_t\n\n # start interactive mode\n plt.ion()\n\n # crete eempty figure on which data will go and first subplot\n fig = plt.figure()\n\n # get into the correct time step\n for time_step in range(start, end):\n # list of colours used for animation\n colours = cm.rainbow(np.linspace(0, 1, N))\n\n # loop over each particle and colour\n for i in range(N):\n # plot x, y poistion of particle in a given colour and set axis to size of box\n plt.scatter(coordinates[time_step][i][0], coordinates[time_step][i][1], s = 1, color = 'r')\n\n # plot the object\n if i < M:\n polygon = np.array(polygons[time_step][i])\n # get the points of the polygon to plot it\n x, y = polygon.T\n\n # print(x, y)\n\n x = np.append(x, x[0])\n y = np.append(y, y[0])\n\n # print(x, y)\n\n # plot the polygon\n plt.plot(x , y)\n # plt.scatter(polygons_com[time_step][i][0], polygons_com[time_step][i][1], s = 5, color = 'g')\n\n if bound_cond == True:\n plt.axis([0, L, 0, L])\n plt.axis([0, L, 0, L])\n # plt.axis([-L*2, L*2, -L*2, L*2])\n\n # show graph\n plt.show()\n plt.pause(time_pause)\n\n # decide if you want to clear\n if clear == True:\n plt.clf()\n\n return None" ]
[ "0.8075153", "0.7686856", "0.7620524", "0.75897986", "0.7136921", "0.71321875", "0.7040748", "0.6928706", "0.6872046", "0.6816004", "0.6803802", "0.6753854", "0.6660338", "0.6658249", "0.66199154", "0.64800495", "0.64625716", "0.6392342", "0.6385967", "0.6356068", "0.63288015", "0.6320881", "0.62962997", "0.6234653", "0.6219322", "0.62048525", "0.61875755", "0.61875755", "0.61875755", "0.61710227", "0.6163704", "0.61540806", "0.6125954", "0.6120073", "0.6099254", "0.6097093", "0.6064618", "0.6064352", "0.60211617", "0.6017243", "0.59948736", "0.5986008", "0.5981082", "0.59804267", "0.59592736", "0.59472114", "0.5944904", "0.5944316", "0.5941025", "0.5940607", "0.5934867", "0.59256804", "0.59220076", "0.5908705", "0.5890821", "0.58687586", "0.58414614", "0.5841", "0.5840332", "0.5839733", "0.5830361", "0.5825941", "0.5819979", "0.5819979", "0.5819979", "0.5819979", "0.5817945", "0.57797533", "0.57749915", "0.5771692", "0.57681286", "0.5749014", "0.5743595", "0.57333213", "0.573199", "0.57298476", "0.5728916", "0.57253766", "0.5718506", "0.5717849", "0.5714237", "0.5713676", "0.5712443", "0.5711108", "0.5705756", "0.57016945", "0.5696848", "0.5696832", "0.569633", "0.56924295", "0.5690808", "0.5682188", "0.5679263", "0.5677316", "0.5676397", "0.56739366", "0.565552", "0.5651508", "0.56513417", "0.56507814" ]
0.7506927
4
All the logic to move, and the game logic goes here. Normally, you'll call update() on the sprite lists that need it.
def update(self, delta_time): # Call draw() on all your sprite lists below game = self.game if game.exit: arcade.close_window() if not game.is_running: return # if level has changed redraw walls if self.game.level != self.last_level: self.reset_level() self.last_level = self.game.level if not game.update_game(delta_time): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_logic(self):\n if not self.game_over:\n # Move all the sprites\n self.all_sprites_list.update()", "def update(self):\n self.moving_sprites.update() \n self.static_sprites.update()\n self.camera.update(self.player)", "def update(self):\r\n if self.able_to_move:\r\n self.pix_pos += self.direction*self.speed\r\n if self.time_to_move():\r\n if self.stored_direction != None:\r\n self.direction = self.stored_direction\r\n self.able_to_move = self.can_move()\r\n # calls to the next function in order to check that the player is within bounds \r\n\r\n self.grid_pos[0] = (self.pix_pos[0]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_width//2)//self.app.cell_width+1\r\n self.grid_pos[1] = (self.pix_pos[1]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_height//2)//self.app.cell_height+1\r\n # keep track of where the player is currently to the grid \r\n\r\n if self.on_coin():\r\n self.eat_coin()\r\n # removes the coin once the player is over the tile\r\n\r\n if self.on_fruit():\r\n self.eat_fruit()\r\n # removes the fruit once the player is over the tile\r", "def update(self):\n self.syncSpriteCoordinates()\n self.moveBasedOnCurrentMomentum()\n #self.decelerate()\n self.checkCanvasBoundsAndWrap()", "def update():\n move()\n check_collision()", "def update(self):\n \n # Move left/right\n self.rect.x += self.change_x\n \n # See if we hit the player\n hit = pygame.sprite.collide_rect(self, self.player)\n if hit:\n # We did hit the player. Shove the player around and\n # assume he/she won't hit anything else.\n \n # If we are moving right, set our right side\n # to the left side of the item we hit\n if self.change_x < 0:\n self.player.rect.right = self.rect.left\n else:\n # Otherwise if we are moving left, do the opposite.\n self.player.rect.left = self.rect.right\n \n # Move up/down\n self.rect.y += self.change_y\n \n # Check and see if we the player\n hit = pygame.sprite.collide_rect(self, self.player)\n if hit:\n # We did hit the player. Shove the player around and\n # assume he/she won't hit anything else.\n \n # Reset our position based on the top/bottom of the object.\n if self.change_y < 0:\n self.player.rect.bottom = self.rect.top\n else:\n self.player.rect.top = self.rect.bottom\n\n for bullet in self.level.bullet_list:\n if pygame.sprite.collide_rect(self, bullet) and bullet.heroBullet == True:\n self.level.bullet_list.remove(bullet)\n self.level.active_sprite_list.remove(bullet)\n \n # Check the boundaries and see if we need to reverse\n # direction.\n if self.rect.bottom > self.boundary_bottom or self.rect.top < self.boundary_top:\n self.change_y *= -1\n \n cur_pos = self.rect.x - self.level.world_shift\n if cur_pos < self.boundary_left or cur_pos > self.boundary_right:\n self.change_x *= -1", "def update(self):\n \n # Move left/right\n self.rect.x += self.change_x\n \n # See if we hit the player\n hit = pygame.sprite.collide_rect(self, self.player)\n if hit:\n self.level.game_over()\n\n for bullet in self.level.bullet_list:\n if pygame.sprite.collide_rect(self, bullet) and bullet.heroBullet == True:\n if self.lives < 2:\n self.level.enemy_list.remove(self)\n self.level.active_sprite_list.remove(self)\n if self.gameEnder == True:\n self.level.you_win()\n self.level.bullet_list.remove(bullet)\n self.level.active_sprite_list.remove(bullet)\n self.lives -= 1\n \n if (self.shooter == True):\n x = randint(0,45)\n\n if x == 1:\n bullet = Bullet(self, False, True, self.demon)\n self.level.active_sprite_list.add(bullet)\n self.level.bullet_list.add(bullet)\n\n # Move up/down\n self.rect.y += self.change_y\n \n # Check the boundaries and see if we need to reverse\n # direction.\n if self.rect.bottom > self.boundary_bottom or self.rect.top < self.boundary_top:\n self.change_y *= -1\n \n cur_pos = self.rect.x - self.level.world_shift\n if cur_pos < self.boundary_left: \n self.change_x *= -1\n frame = (cur_pos // 30) % len(self.walking_frames_r)\n self.image = self.walking_frames_r[frame]\n\n if cur_pos > self.boundary_right:\n frame = (cur_pos // 30) % len(self.walking_frames_l)\n self.image = self.walking_frames_l[frame]\n self.change_x *= -1", "def update(self):\r\n\r\n # Two states, either target is shot or its moving\r\n if self.state == 'shot':\r\n self.hold -= 1\r\n self.z -= 0.03\r\n self.y -= 5\r\n if self.z < 0:\r\n self.z = 0\r\n self.opacity -= 4 if self.opacity > 0 else 0\r\n self.sprite.set_position(self.x, self.y)\r\n self.sprite.update(scale = self.z, rotation = int((self.y % 356)))\r\n self.sprite.opacity = self.opacity\r\n if self.hold <= 0:\r\n self.sprite.delete()\r\n elif self.state == 'moving':\r\n self.x += (self.speed * self.x_direction)\r\n self.y += (self.speed * self.y_direction)\r\n\r\n self.sprite.set_position(self.x, self.y)\r\n\r\n\r\n if int(t.time()) - self.last_change_x_time > 1:\r\n if random.random() <= self.prob_change_x:\r\n self.x_direction *= -1\r\n self.last_change_x_time = int(t.time())\r\n\r\n if int(t.time()) - self.last_change_y_time > 1:\r\n if random.random() <= self.prob_change_y:\r\n self.y_direction *= -1\r\n self.last_change_y_time = int(t.time())\r\n\r\n if self.x < self.anchor_x:\r\n print(f'set to {self.anchor_x}')\r\n self.x = self.anchor_x\r\n self.x_direction *= -1\r\n if (self.x + self.anchor_x) > (config.window_width):\r\n self.x = (config.window_width-self.anchor_x)\r\n self.x_direction *= -1\r\n\r\n if self.y < self.anchor_y:\r\n self.y = self.anchor_y\r\n self.y_direction *= -1\r\n if (self.y + self.anchor_y) > (config.window_height):\r\n self.y = (config.window_height-self.anchor_y)\r\n self.y_direction *= -1\r\n\r\n if self.x_direction == 1:\r\n self.sprite.update(rotation = 2*self.speed)\r\n elif self.x_direction == -1:\r\n self.sprite.update(rotation = -(2*self.speed))", "def update(self):\n if self.is_moving_up:\n self.dirty = 1\n if self.is_moving_down:\n self.dirty = 1\n if self.is_moving_right:\n self.dirty = 1\n if self.is_moving_left:\n self.dirty = 1\n \n self.rect.x += self.moveX\n self.logic.wall_hit_logic(self.moveX, \"x\", self.room.wall_list)\n self.room_change.change_room()\n \n self.rect.y += self.moveY\n self.logic.wall_hit_logic(self.moveY, \"y\", self.room.wall_list)\n self.room_change.change_room()", "def move(self):\r\n\r\n # Randomizes movement after 40 steps and flips sprite \\\r\n # (if x-value of speed variable changes from positive to negative)\r\n if step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 40 steps, but doesn't flip sprite because \\\r\n # x-value of speed variable doesn't change from positive to negative\r\n elif step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Randomizes movement after 80 steps and flips sprite \\\r\n # (if x-value of speed variable changes from negative to positive)\r\n if step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 80 steps, but doesn't flip sprite \\\r\n # because x-value of speed variable doesn't change from positive to negative\r\n elif step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Flips the dino sprite when it hits the left or right side of the enclosure \\\r\n # and reverses dino's speed\r\n if self.rect.right > 818 or self.rect.left < 182:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[0] = - self.speed[0]\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Reverses the dino's speed if it hits the top or bottom side of the enclosure\r\n if self.rect.top < 55 or self.rect.bottom > 542:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[1] = - self.speed[1]\r\n\r\n # Causes dinosaur to go to the tree when hunger is high enough\r\n if hunger >= 205:\r\n if step != 40 and step != 80 and 0 < thirst < 175:\r\n if self.rect.left > 300 and self.speed[0] not in range(-1000, 0):\r\n # Speed must be rounded so that speed[0] and speed[1] is in the range functions above \\\r\n # (range function doesn't take decimal point numbers)\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 300 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n if self.rect.left < 300 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 300 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n\r\n # Causes dinosaur to go to the pond when thirst is high enough\r\n if thirst == 175:\r\n if step != 40 and step != 80:\r\n if self.rect.left > 540 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 540 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n if self.rect.left < 540 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 540 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n\r\n # Sets rectangle surrounding dino sprite to new position based on its speed\r\n newpos = self.rect.move(self.speed)\r\n self.rect = newpos", "def move(self, walls):\n \n # Move left/right\n self.rect.x += self.change_x\n \n # Did this update cause us to hit a wall?\n block_hit_list = pygame.sprite.spritecollide(self, walls, False)\n for block in block_hit_list:\n # If we are moving right, set our right side to the left side of\n # the item we hit\n if self.change_x > 0:\n self.rect.right = block.rect.left\n else:\n # Otherwise if we are moving left, do the opposite.\n self.rect.left = block.rect.right\n \n # Move up/down\n self.rect.y += self.change_y\n \n # Check and see if we hit anything\n block_hit_list = pygame.sprite.spritecollide(self, walls, False)\n for block in block_hit_list:\n \n # Reset our position based on the top/bottom of the object.\n if self.change_y > 0:\n self.rect.bottom = block.rect.top\n else:\n self.rect.top = block.rect.bottom", "def updateAllSpritesPositions(self):\n\n # aggiorno posizione del player\n self.player.update()\n\n # aggiorno posizione dei nemici\n self.enemies.update()\n\n # aggiorno posizione delle bombe\n self.bombs.update()\n\n # aggiorno posizione delle wall bombs\n self.wallBombs.update()\n\n # aggiorno posizione dei ricaricatori del tempo\n self.timeReloaders.update()\n\n # aggiorno posizione dei killer dei nemici\n self.enemyKillers.update()\n\n # aggiorno la posizione dei greedy enemies\n self.greedyEnemies.update()\n\n # aggiorno la posizione di tutti i portali\n self.portals.update()\n\n # aggiorno la posizione di tutti gli invisible players\n self.invisibilityPlayers.update()\n\n # aggiorno la posizione di tutti i proiettili insieme al bonus\n self.playerBullets.update()\n self.bonusPlayerBullets.update()\n\n # aggiorno la posizione dei proiettili degli nemici che sparano\n #self.shooters.update()\n self.shooterBullets.update()", "def update(self):\n # Update the mario's x, y values, not the rect.\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self._move_right()\n if self.moving_left and self.rect.left > 0:\n self._move_left()\n if self.jumping > 0:\n self._perform_jump()\n if self.jumping <= 0:\n self._perform_landing()\n\n # Update rect object from self.x.\n self.rect.x = self.x\n self.rect.y = self.y", "def update(self, delta_time):\n\n #scene_dico = self.scene.__dict__\n\n #print(\"\\n\\n *******\")\n\n #for k, v in scene_dico.items():\n # print(k,\" \",v)\n # print(\"------------\")\n \n\n #self.scene[sprite_lists].update()\n\n #print(type(self.scene))\n #self.scene.sprite_lists.update()\n\n for spl in self.scene.sprite_lists:\n spl.update()\n\n \n\n self.frame_count += 1\n self.player_list.update()\n\n for i in range (self.lives):\n \n\n\n self.life_list[i].center_x = (self.player_sprite.center_x - SCREEN_WIDTH // 2) + i * self.life_list[i].width\n self.life_list[i].center_y = (self.player_sprite.center_y - SCREEN_HEIGHT // 2) \n\n\n self.crosshair_sprite.center_x = self.player_sprite.center_x + self.crosshair_relative_xoffset\n self.crosshair_sprite.center_y = self.player_sprite.center_y + self.crosshair_relative_yoffset\n\n \n\n\n\n \n\n \n\n self.enemy_list.update()\n \n\n # Move the player with the physics engine\n #self.physics_engine_walls.update()\n #self.stairs_list.update()\n\n\n\n \n\n\n\n\n\n #for medikit in ammo_medikit_hit_list:\n # medikit.remove_from_sprite_lists()\n \n # self.player_sprite.cur_health += MEDIKIT_HEALTH_BOOST\n\n\n\n\n\n # Generate a list of all sprites that collided with the player.\n #stairs_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n # self.stairs_list)\n\n #for stairs in stairs_hit_list:\n # self.level += 1\n # self.is_smoked = False\n # Load the next level\n # self.setup(self.level) # .............?????????.........\n\n # Set the camera to the start\n # self.view_left = 0\n # self.view_bottom = 0\n # changed_viewport = True\n\n\n\n \n\n\n # Loop through each enemy that we have\n for enemy in self.enemy_list:\n\n \n start_x = enemy.center_x\n start_y = enemy.center_y\n\n \n dest_x = self.player_sprite.center_x\n dest_y = self.player_sprite.center_y\n \n x_diff = dest_x - start_x\n y_diff = dest_y - start_y\n angle = math.atan2(y_diff, x_diff)\n\n # Set the enemy to face the player.\n enemy.angle = math.degrees(angle)-90\n\n # Shoot every 60 frames change of shooting each frame\n if self.frame_count % 60 == 0:\n bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") \n \n bullet.center_x = start_x\n bullet.center_y = start_y\n\n # Angle the bullet sprite\n bullet.angle = math.degrees(angle)\n\n # Taking into account the angle, calculate our change_x\n # and change_y. Velocity is how fast the bullet travels.\n bullet.change_x = math.cos(angle) * BULLET_SPEED\n bullet.change_y = math.sin(angle) * BULLET_SPEED\n\n #self.bullet_list.append(bullet) -------------------------\n self.terro_bullet_list.append(bullet)\n\n\n \n\n\n\n # --- Manage Scrolling ---\n\n # Scroll left\n\n \"\"\"\n left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN\n if self.player_sprite.left < left_boundary:\n self.view_left -= left_boundary - self.player_sprite.left\n changed_viewport = True\n\n # Scroll right\n right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN\n if self.player_sprite.right > right_boundary:\n self.view_left += self.player_sprite.right - right_boundary\n changed_viewport = True\n\n # Scroll up\n top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN\n if self.player_sprite.top > top_boundary:\n self.view_bottom += self.player_sprite.top - top_boundary\n changed_viewport = True\n\n # Scroll down\n bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN\n if self.player_sprite.bottom < bottom_boundary:\n self.view_bottom -= bottom_boundary - self.player_sprite.bottom\n changed_viewport = True\n\n if changed_viewport:\n # Only scroll to integers. Otherwise we end up with pixels that\n # don't line up on the screen\n self.view_bottom = int(self.view_bottom)\n self.view_left = int(self.view_left)\n\n # Do the scrolling\n arcade.set_viewport(self.view_left,\n SCREEN_WIDTH + self.view_left,\n self.view_bottom,\n SCREEN_HEIGHT + self.view_bottom)\n\n \"\"\"\n\n\n if self.player_sprite.right >= self.end_of_map:\n if self.level < self.max_level:\n self.level += 1\n self.load_level(self.level)\n self.player_sprite.center_x = 128\n self.player_sprite.center_y = 64\n self.player_sprite.change_x = 0\n self.player_sprite.change_y = 0\n else:\n self.game_over = True\n\n\n\n\n self.pan_camera_to_user()\n\n\n\n # ///////// joy\n\n joy_dico = self.window.joys[0]\n\n BTN_A = joy_dico.button_controls[0]\n BTN_B = joy_dico.button_controls[1]\n BTN_X = joy_dico.button_controls[2]\n BTN_Y = joy_dico.button_controls[3]\n BTN_TL = joy_dico.button_controls[4]\n BTN_TR = joy_dico.button_controls[5]\n BTN_SELECT = joy_dico.button_controls[6]\n BTN_START = joy_dico.button_controls[7]\n BTN_MODE = joy_dico.button_controls[8]\n BTN_THUMBL = joy_dico.button_controls[9]\n BTN_THUMBR = joy_dico.button_controls[10]\n\n\n print(f\"\\n BTN_A ----> {BTN_A}\")\n\n\n BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]\n\n BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y, self.joy_TL, self.joy_TR, self.joy_SELECT, self.joy_START, self.joy_MODE, self.joy_THUMBL, self.joy_THUMBR]\n\n for BTN in BTN_list:\n if BTN._value == 1:\n print(f\"=====> >=====> ====> {BTN.raw_name}\")\n\n idx = BTN_list.index(BTN)\n\n BTN_fn_list[idx]()", "def move(self):\n self.tick()\n self.pressed = pygame.key.get_pressed()\n\n \"\"\"Do deleting platforms/do losing\"\"\"\n self.player.update(self)\n self.main_platform.update(self)\n\n for i in self.platformsx:\n self.platformsx[i].update(self)\n \n for i in self.platforms:\n self.platforms[i].update(self)\n\n for i in self.to_remove:\n self.platforms.pop(i)\n self.to_remove.clear()\n\n for i in self.platformsx:\n self.platformsx[i].update(self)", "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.bottom < 0:\n self.kill()\n\n ## now we need a way to shoot\n ## lets bind it to \"spacebar\".\n ## adding an event for it in Game loop", "def update(self, seconds):\n # Gravity\n self.calcGravity(seconds)\n \n # Move left/right\n self.rect.x += self.change_x\n \n # See if we hit anything\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n # If we are moving right,\n # set our right side to the left side of the item we hit\n if self.change_x > 0:\n self.rect.right = block.rect.left\n elif self.change_x < 0:\n # Otherwise if we are moving left, do the opposite.\n self.rect.left = block.rect.right\n \n # Move up/down\n self.rect.y += self.change_y\n \n # update arm position\n self.arm.update(seconds)\n \n # Check and see if we hit anything\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n \n # Reset our position based on the top/bottom of the object.\n if self.change_y > 0:\n self.rect.bottom = block.rect.top\n elif self.change_y < 0:\n self.rect.top = block.rect.bottom\n \n # Stop our vertical movement\n self.change_y = 0", "def update(self):\n events = pygame.event.get()\n self.plane_update()\n self.bullet_update(events)\n self.background_update()\n self.enemy_update(events)", "def update(self):\n self.rect.y += self.speedy\n # kill the sprite after it moves over the top border\n if self.rect.bottom < 0:\n self.kill()\n\n # now we need a way to shoot\n # lets bind it to \"spacebar\".\n # adding an event for it in Game loop", "def update(self):\n # delete sprite if fired\n if not self.player.state == 'USE_A':\n self.game.all_sprites.remove(self)", "def run_game(self):\n while True:\n self._check_events() # check event listener\n \n if self.stats.game_active:\n self.ship.update() # update position\n # self.bullets.update() # will update each sprite in the group\n self._update_bullets() \n self._update_aliens()\n\n# self.laser.update()\n self._update_screen() # refresh screen", "def update(self) -> None:\n self.all_sprites.update()", "def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()", "def update(self):\r\n if self.move_right and self.rect.right < self.screen_rect.right:\r\n self.centerx += self.game_settings.rocket_speed_factor\r\n if self.move_left and self.rect.left > 0:\r\n self.centerx -= self.game_settings.rocket_speed_factor\r\n if self.move_down and self.rect.bottom < self.screen_rect.bottom:\r\n self.centery += self.game_settings.rocket_speed_factor\r\n if self.move_up and self.rect.top > 0:\r\n self.centery -= self.game_settings.rocket_speed_factor\r\n\r\n # Update rect object from self.centerx and self.centery\r\n self.rect.centerx = self.centerx\r\n self.rect.centery = self.centery", "def run_logic(self):\n if not self.game_over:\n # Move spites\n self.all_sprites_list.update()\n\n # Check collisions of player\n block_hit_list = pygame.sprite.spritecollide(self.player, self.block_list, True)\n\n # Check list of collisions\n for block in block_hit_list:\n self.score += 1\n print(self.score)\n # Do something with block?\n if len(self.block_list) == 0:\n self.game_over = True", "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "def run_logic(self):\r\n \r\n #update the player's position\r\n self.player.update()\r\n\r\n for e in self.elist:\r\n e.Enmove() #move the enemy\r\n \r\n\r\n \r\n #check for collisions with platforms and set player's position accordingly\r\n plathits = pygame.sprite.spritecollide(self.player, self.plist, False)\r\n if plathits:\r\n if self.player.vel.y < 0: #If colliding from below, player moves down\r\n self.player.vel.y = -self.player.vel.y\r\n else:\r\n self.player.pos.y = plathits[0].rect.top #player moves to top\r\n self.player.vel.y = 0 #stop player from moving when on platform\r\n self.player.grounded = True #To determine whether or not player can jump \r\n\r\n else:\r\n self.player.grounded = False #player cannot jump\r\n\r\n\r\n #Checking for enemy collision\r\n enhits = pygame.sprite.spritecollide(self.player, self.elist, False)\r\n \r\n #enemy collisions and lives\r\n for e in enhits:\r\n self.player.wait -= 5\r\n if self.player.wait == 0: #\"invincibility\" before life is lost \r\n self.player.hide() #hide player\r\n self.player.lives -= 1\r\n self.player.wait = 90\r\n\r\n\r\n #Checking for coin collisions\r\n coinhits = pygame.sprite.spritecollide(self.player, self.clist, True)\r\n for c in coinhits:\r\n self.player.coincount += 1\r\n #play coin collect music\r\n if self.musicPlaying:\r\n self.pickUpSound.play()\r\n\r\n #checking whether player has collided with next level platform\r\n levhits = pygame.sprite.spritecollide(self.player, self.nlist, False)\r\n if levhits:\r\n self.player.level = 2\r\n\r\n #checking whether player has reached the boss\r\n bossplathits = pygame.sprite.spritecollide(self.player, self.blist, False)\r\n if bossplathits:\r\n self.player.level = 3\r\n\r\n\r\n #scrolling entire screen, player, and enemy upwards\r\n if self.player.rect.top <= WINDOWHEIGHT/4:\r\n self.player.pos.y += abs(self.player.vel.y) #player moves up when reaches 3/4 of screen\r\n #Likewise, all other sprites shift upwards. When a sprite is no longer on\r\n #the screen, it is deleted\r\n for plat in self.plist:\r\n plat.rect.y += abs(self.player.vel.y)\r\n if plat.rect.top >= WINDOWHEIGHT:\r\n plat.kill()\r\n for e in self.elist:\r\n e.rect.y += abs(self.player.vel.y)\r\n if e.rect.top >= WINDOWHEIGHT:\r\n e.kill()\r\n for c in self.clist:\r\n c.rect.y += abs(self.player.vel.y)\r\n if c.rect.top >= WINDOWHEIGHT:\r\n c.kill()\r\n for n in self.nlist:\r\n n.rect.y += abs(self.player.vel.y)\r\n if n.rect.top >= WINDOWHEIGHT:\r\n n.kill()\r\n for b in self.blist:\r\n b.rect.y += abs(self.player.vel.y)\r\n if b.rect.top >= WINDOWHEIGHT:\r\n b.kill()\r\n for end in self.endlist:\r\n end.rect.y += abs(self.player.vel.y)\r\n if end.rect.top >= WINDOWHEIGHT:\r\n end.kill()\r\n\r\n #checking if player has reached final platform. If so,\r\n #game_complete is set to True\r\n endhits = pygame.sprite.spritecollide(self.player, self.endlist, False)\r\n if endhits:\r\n self.game_complete = True\r\n\r\n #game over when all lives are lost or player falls down\r\n if self.player.lives == 0:\r\n self.game_over = True\r\n \r\n elif self.player.rect.top > WINDOWHEIGHT:\r\n self.game_over = True", "def update(self, *args):\n\n\t\t# Update Bullets\n\t\tif self.power == 'bulletup' and self.level >= 2:\n\t\t\tself.angle_bullets(self.level)\n\t\t\n\t\t# Update Lazer\n\t\tif self.power == 'lazerup' and self.level > 0:\n\n\t\t\tself.index += 1\n\t\t\tif self.index % 12:\n\t\t\t\tself.step += 1\n\t\t\t\n\t\t\tself.y -= self.speed\n\n\n\t\t\tself.rect.y = self.y\n\t\t\tself.rect.x = self.x\n\n\t\t\t# print(\"SLOPE??? \", self.slope)\n\t\t\tself.sheet.blitme(self.screen, self.step % self.sheet.totalCells, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.rect.x, self.rect.y)\n\n\t\t# Update Bombs\n\t\tif self.power == 'bombup' and self.level > 0:\n\t\t\tself.bomb_vector()\n\n\t\t# Update Default\n\t\telse:\n\t\t\tself.y -= self.speed\n\t\t\tself.rect.y = self.y\n\n\t\tpygame.display.flip()", "def update(self):\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.center += self.ai_settings.ship_speed_factor\n elif self.moving_left and self.rect.left > 0:\n self.center -= self.ai_settings.ship_speed_factor\n elif self.moving_up and self.rect.top > self.screen_rect.top:\n self.bottom -= self.ai_settings.ship_speed_factor\n elif self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n self.bottom += self.ai_settings.ship_speed_factor\n\n # update rect object from self.center.\n self.rect.centerx = self.center\n self.rect.bottom = self.bottom", "def tick():\n move_balls(targets_speed)\n move_super_balls(targets_speed * 2)", "def update(self):\n # clear surface with clean background\n self.surface.blit(self.background_image, self.background_image.get_rect())\n self.speed_multiplier_multiplier += 0.0001 # gradually increase speed\n\n # handle all the events from this tick\n for event in pygame.event.get():\n if event.type == pygame.QUIT: #The user closed the window!\n stop()\n if event.type == pygame.KEYDOWN:\n # key was pressed down on this tick\n if event.key in [pygame.K_ESCAPE]:\n # the player has paused the game. Enter pause menu\n menu.game_state = 'in menu'\n menu.state = 'paused'\n elif event.key == pygame.K_SPACE:\n if self.fuel_bar.fuel_amount > 1:\n self.player.shoot()\n elif event.type == GENERATE_OBSTACLE:\n __asteroid = asteroids.Asteroid(resolution) # create a temporary pointer to new asteroid\n # add new asteroid to sprite lists for updating an rendering\n self.asteroids.add(__asteroid)\n self.sprites.add(__asteroid)\n elif event.type == GENERATE_FUEL:\n __fuel_can = FuelBarrel()\n self.fuel_cans.add(__fuel_can)\n self.sprites.add(__fuel_can)\n\n self.pressed_keys = pygame.key.get_pressed() # get a list of all the keys currently held down\n\n # if any valid keys for speed increase are pressed\n if self.pressed_keys[pygame.K_UP] or self.pressed_keys[pygame.K_w] or self.pressed_keys[pygame.KMOD_SHIFT]:\n if self.fuel_bar.fuel_amount > 1:\n self.speed_multiplier = 1.8 # the asteroids should move slightly faster\n else:\n self.speed_multiplier = 1\n else:\n self.speed_multiplier = 1\n\n # update all the sprites and give them the speed multiplier\n # multiply speed multiplier again to increase speed as the game continues\n self.sprites.update(self.speed_multiplier*self.speed_multiplier_multiplier)\n # test for collisions between bullets and asteroids, delete colliding sprites\n pygame.sprite.groupcollide(self.asteroids, self.bullets, True, True, pygame.sprite.collide_mask)\n\n self.sprites.draw(self.surface) # draw all game sprites to game surface\n self.top_layer.draw(self.surface)\n\n self.__collision_holder = check_collisions(self.player, self.fuel_cans)\n if self.__collision_holder:\n for can in self.__collision_holder:\n can.kill()\n self.fuel_bar.modify(1)\n\n # if player is colliding with an asteroid they have lost the round, go to death menu\n if check_collisions(self.player, self.asteroids):\n menu.game_state = 'in menu'\n menu.state = 'death'", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def update(self):\n self.rect.y += self.speedy\n # kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "def update(self):\n\t\tif self.moving_right and self.rect.right < self.screen_rect.right:\n\t\t\tself.centerx += self.settings.hero_speed_factor\n\t\tif self.moving_left and self.rect.left > 0:\n\t\t\tself.centerx -= self.settings.hero_speed_factor\n\t\tif self.moving_up and self.rect.top > 0:\n\t\t\tself.centery -= self.settings.hero_speed_factor\n\t\tif self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n\t\t\tself.centery += self.settings.hero_speed_factor\n\t\t\t\t\t\n\t\t# Update our hero's rect object's x and y\n\t\tself.rect.centerx = self.centerx\n\t\tself.rect.centery = self.centery", "def update(self):\n frame = str(self.image_number//10)\n if self.image_number < 30: # Not yet on the tenth frame\n self.image_number += 1\n else: # Reset back to 0\n self.image_number = 0\n\n image_location = os.path.join(\"assets\", \"player\" + frame + \".png\") # Get image path\n self.image = pygame.image.load(image_location).convert_alpha() # Load image\n\n # Keyboard events\n keys_pressed = pygame.key.get_pressed()\n if keys_pressed[pygame.K_UP]:\n self.move(0, -5)\n if keys_pressed[pygame.K_LEFT]:\n self.move(-5, 0)\n if keys_pressed[pygame.K_RIGHT]:\n self.move(5, 0)\n if keys_pressed[pygame.K_DOWN]:\n self.move(0, 5)\n\n # Mouse events\n mouse_pos = pygame.mouse.get_pos() # Get position of mouse as a tuple representing the\n # (x, y) coordinate\n\n mouse_buttons = pygame.mouse.get_pressed()\n if mouse_buttons[0]: # If left mouse pressed\n self.teleport(mouse_pos[0], mouse_pos[1])\n if mouse_buttons[2]: # If right mouse pressed\n self.teleport(mouse_pos[0], mouse_pos[1])", "def update(self):\n\n # Track FPS count\n if self.fps_counter + 1 >= 60:\n self.fps_counter = 0\n\n self.fps_counter += 1\n\n # Update movement animation and position\n if self.moving_right:\n self.image = self.animated_right[self.fps_counter // 30]\n self.x += self.movement_speed\n\n if self.moving_left:\n self.image = self.animated_left[self.fps_counter // 30]\n self.x -= self.movement_speed\n\n if self.moving_up:\n self.image = self.animated_up[self.fps_counter // 30]\n self.y -= self.movement_speed\n\n if self.moving_down:\n self.image = self.animated_down[self.fps_counter // 30]\n self.y += self.movement_speed\n\n self.rect.x, self.rect.y = self.x, self.y", "def deflector(screen, player):\n\n #Iterate through the enemies\n for sprite in screen.enemies:\n\n #Move the enemy back\n sprite.move(0, -10)\n\n #Iterate through the other sprites\n for sprite in screen.other_enemies:\n\n #If it is the mothership ignore it\n if type(sprite) == Mothership:\n continue\n\n #Otherwise move the sprite back\n sprite.move(0, -10)", "def draw(self):\n\n # I reset it at 24 because they're 4 images and I want the reduce the animation speed by 6 (6*4=24)\n if self.spriteCount + 1 >= 24:\n self.spriteCount = 0\n if self.isJump:\n self.screen.blit(self.spriteJump[self.spriteCount // 6], (self.x_pos, self.y_pos))\n else:\n self.screen.blit(self.spriteFall[self.spriteCount // 6], (self.x_pos, self.y_pos))\n self.spriteCount += 1", "def __init__(self, position, direction):\n super().__init__() # initialize sprite parent class\n self.position = list(position) # make a copy of the given position\n # load own images, each is slightly different for animation\n self.images = [\n pygame.image.load('images/bullets/0.png').convert_alpha(),\n pygame.image.load('images/bullets/1.png').convert_alpha(),\n pygame.image.load('images/bullets/2.png').convert_alpha()\n ]\n # counter to keep track of when to switch images\n # <10 img 0\n # >=10<20 img 1\n # >=20<30 img 2\n self.image_counter = 0\n self.image = self.images[self.image_counter]\n self.rect = self.image.get_rect(center=self.position)\n # all the images have a very similar mask so only one needs to be generated\n self.mask = pygame.mask.from_surface(pygame.image.load('images/bullets/mask.png').convert_alpha())\n\n self.direction = direction\n self.speed = 25\n self.move_amount = (0, 0) # amount to move this tick", "def update(self):\n self.counter += 1\n \n if self.counter == 20: \n if self.image == self.sprite1:\n self.image = self.sprite2 \n else:\n self.image = self.sprite1\n self.counter = 0\n\n if self.rect.right >= self.end_right:\n self.rect.centery += self.__dy\n self.__dx = -self.__dx\n if self.rect.left <= self.end_left:\n self.rect.centery += self.__dy\n self.__dx = -self.__dx\n\n self.rect.centerx += self.__dx", "def update(self):\n\n # calculate bullet velocity(movement) from ship to cursor position, where aimed at\n self._calc_bullet_velocity()\n\n # update the decimal position of the bullet. | maybe add the additional feature of adding __speed ontop of velocity too.\n if self.__ship_x < self.__cursor_x: # if ship is to the left of cursor on the screen\n self.__x += self.__x_velocity * self.__speed \n elif self.__ship_x > self.__cursor_x: # if ship is to the right of cursor on the screen\n self.__x -= self.__x_velocity * self.__speed\n\n if self.__ship_y < self.__cursor_y: # if ship is above cursor on the screen\n self.__y += self.__y_velocity * self.__speed\n else:\n self.__y -= self.__y_velocity * self.__speed\n\n # update the rect position.\n self.rect.y = self.__y\n self.rect.x = self.__x", "def update(self, screen_rect):\n if self.actions == Action.actor_idle:\n if self.current_sprite_list != self.sprites[Action.idle]:\n self.current_sprite_list = self.sprites_from_action(Action.idle)\n self.update_frames = Fighter.sprite_update_frames[Action.idle]\n self.updated_sprite_list = True\n else:\n last_action = self.actions[-1]\n action_sprites = self.sprites_from_action(last_action)\n if self.current_sprite_list != action_sprites:\n self.current_sprite_list = action_sprites\n self.updated_sprite_list = True\n\n if Action.jump_charge in self.actions:\n if self.jump_charge < self.jump_max_charge:\n self.jump_charge += 1\n\n if Action.charge in self.actions:\n if self.attack_charge < self.attack_max_charge:\n self.attack_charge += .2\n\n if Action.jumping not in self.actions and not self.gg:\n self.check_gravity(screen_rect.height)\n self.update_screen_loc()\n self.update_image()", "def update(self):\n # self.rect.centerx can only hold integers, so to use speed factor \n # we have to update centerx in a roundabout way:\n # pass float centers to self.center then assign self.center to self.rect.centerx\n \n cond_move_right = self.moving_right and self.rect.right < self.screen_rect.right\n cond_move_left = self.moving_left and self.rect.left > self.screen_rect.left\n \n self.center += 1 * cond_move_right * self.ai_settings.ship_speed_factor \\\n -1 * cond_move_left * self.ai_settings.ship_speed_factor\n \n self.rect.centerx = self.center", "def update(self, dt):\n # First, step time forward.\n self.player.lastfired += dt\n self.hostile.lastfired += dt\n self.accelerate_hero(dt)\n if self.hostile in self.collidables:\n self.accelerate_hostile(dt)\n\n # Next, move the objects around the screen\n self.player.move(windowsize=Window.size)\n self.hostile.move(windowsize=Window.size)\n for asteroid in self.asteroids:\n asteroid.move(windowsize=Window.size)\n for shell in self.player.shells:\n shell.move(windowsize=Window.size)\n if shell.offscreen:\n self.player.shells.remove(shell)\n self.remove_widget(shell)\n for shell in self.hostile.shells:\n shell.move(windowsize=Window.size)\n if shell.offscreen:\n self.hostile.shells.remove(shell)\n self.remove_widget(shell)\n\n # Finally, check for any collisions\n self.detect_collisions(dt)", "def update(self):\n\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n pygame.mixer.music.stop()\n self.dead = True\n\n if self.toggle_death:\n if self.cur_frame_score == 0.0:\n score_val = int(self.ai_settings.alien_points * 10.0)\n\n score_str = \"{:,}\".format(score_val)\n self.score_image = self.font.render(score_str, True, self.text_color)\n\n # Display the score at the top right of the screen.\n self.score_rect = self.score_image.get_rect()\n self.score_rect.centerx = self.rect.centerx\n self.score_rect.centery = self.rect.centery\n\n self.cur_frame_score += 1.0\n\n if self.cur_frame_score == self.max_frame_score:\n self.dead = True\n else:\n # Update the sprite animation.\n speed_increase = 2.0 + self.ai_settings.alien_speed_factor\n\n if speed_increase > 8.0:\n speed_increase = 8.0\n\n self.cur_frame += speed_increase\n\n self.x += speed_increase\n self.rect.x = self.x\n\n while self.cur_frame > self.max_frames:\n self.cur_frame -= self.max_frames\n\n if self.cur_frame < 40.0:\n self.image = self.image1\n elif self.cur_frame >= 40.0:\n self.image = self.image2", "def update(self):\r\n self.frame_cnt += 1\r\n\r\n if self.frame_cnt % CannonBall.FRAMES_PER_STEP == 0:\r\n self.x += self.d_x\r\n self.y += self.d_y\r\n self.rect = self.image.get_rect().move(self.x, self.y)\r\n self.steps_taken += 1\r\n\r\n if self.steps_taken >= CannonBall.STEPS_TO_DEST:\r\n self.kill()", "def draw(self):\n \n # Draw the background\n self.world.fill(BLUE)\n \n # Draw all the sprite lists that we have\n self.wall_list.draw(self.world)\n self.enemy_list.draw(self.world)\n self.sludge.draw(self.world)\n self.consumeable.draw(self.world)\n self.can_climb.draw(self.world)", "def move(self):\n\n # Move.\n old_position = self.position\n self.position += self.velocity + 0.5 * self.acceleration\n self.rect.x = round(self.position.x)\n self.rect.y = round(self.position.y)\n self.ground_detector.update(self.rect)\n\n # Handle collisions.\n # Complexity is constant, because the character is allowed to\n # have only 8 neighboring sprites being platform or bridge.\n # In practise there is up to 3 (or 4) collisions possible.\n collisions = (\n pygame.sprite.spritecollide(self, self.world.floors, False) +\n pygame.sprite.spritecollide(self, self.world.corners, False) +\n pygame.sprite.spritecollide(self, self.world.bridges, False)\n )\n \n ground_collisions = self.ground_detector.ground_sprites()\n\n for collision in collisions:\n if (collision in ground_collisions and\n old_position.y + CELL_SIZE <= collision.rect.top):\n self.rect.bottom = collision.rect.top\n self.velocity.y = 0\n\n # Check collisions with walls - horizontal ones\n collisions = (\n pygame.sprite.spritecollide(self, self.world.walls, False) +\n pygame.sprite.spritecollide(self, self.world.corners, False)\n )\n\n for collision in collisions:\n if (collision.rect.centerx > self.rect.centerx):\n self.rect.right = collision.rect.left\n self.velocity.x = 0\n else:\n self.rect.left = collision.rect.right\n self.velocity.x = 0\n\n # Update position after collisions.\n self.position.x = self.rect.x\n self.position.y = self.rect.y\n self.ground_detector.update(self.rect)\n\n # Clear the acceleration to avoid jiggling on the floor (it simulates\n # reaction force of the ground)- the 'update' method will take care\n # of gravity.\n self.acceleration = bs.Point(0, 0)", "def setup(self):\n\n # Sprite lists\n self.all_sprites_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.ghost_list = arcade.SpriteList()\n\n # Set up the player\n self.score = 0\n self.coins_left = 25\n self.player_sprite = arcade.Sprite(\"pumpkin.png\", SPRITE_SCALING)\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 64\n\n\n self.boo_sprite = arcade.Sprite(\"boo.png\", 1)\n self.boo_sprite.set_position(500,500)\n\n self.game_over_sprite = arcade.Sprite(\"game_over.png\",1)\n self.game_over_sprite.set_position(500,500)\n\n \n\n#MAPPING START ###################################################\n\n\n mapArray = []\n\n mapFile = open(\"map.txt\",\"r\")\n\n content = mapFile.readline()\n\n line = 1\n\n while content:\n\n mapArray.append(content)\n\n content = mapFile.readline()\n\n \"\"\" SET UP THE MAIN MAP FILE \"\"\"\n MapFinal = []\n for row in range(32):\n MapRow = ['']\n for column in range(24):\n MapColumn = ['']\n MapRow.append(MapColumn)\n MapFinal.append(MapRow)\n\n for a in range(32):\n for b in range(24):\n if mapArray[a][b] == \"w\":\n MapFinal[a][b] = \"w\"\n elif mapArray[a][b] == \"t\":\n MapFinal[a][b] = \"t\"\n elif mapArray[a][b] == \"-\":\n MapFinal[a][b] = \"-\"\n\n\n for x in range(32):\n for y in range(24):\n\n if MapFinal[x][y] == 'w':\n x_block, y_block = locator(x,y)\n wall = arcade.Sprite(\"box.png\", BOX_SCALING)\n wall.center_x = x_block\n wall.center_y = y_block\n self.wall_list.append(wall)\n\n ## MAPPING END #############################################\n\n # -- Randomly place coins where there are no walls\n # Create the coins\n for i in range(NUMBER_OF_COINS):\n\n coin = arcade.Sprite(\"apple.png\", APPLE_SCALING)\n\n # --- IMPORTANT PART ---\n\n # Boolean variable if we successfully placed the coin\n coin_placed_successfully = False\n\n # Keep trying until success\n while not coin_placed_successfully:\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(SCREEN_HEIGHT)\n\n # See if the coin is hitting a wall\n wall_hit_list = arcade.check_for_collision_with_list(coin, self.wall_list)\n\n # See if the coin is hitting another coin\n coin_hit_list = arcade.check_for_collision_with_list(coin, self.coin_list)\n\n if len(wall_hit_list) == 0 and len(coin_hit_list) == 0:\n # It is!\n coin_placed_successfully = True\n\n # Add the coin to the lists\n self.coin_list.append(coin)\n\n\n #Create the ghosts\n for i in range(NUMBER_OF_GHOSTS):\n\n ghost = arcade.Sprite(\"ghost.png\", GHOST_SCALING)\n ghost_placed_successfully = False\n while not ghost_placed_successfully:\n ghost.center_x = random.randrange(SCREEN_WIDTH)\n ghost.center_y = random.randrange(SCREEN_HEIGHT)\n\n wall_hit_list = arcade.check_for_collision_with_list(ghost, self.wall_list)\n coin_hit_list = arcade.check_for_collision_with_list(ghost, self.coin_list)\n ghost_hit_list = arcade.check_for_collision_with_list(ghost, self.ghost_list)\n player_hit_list = arcade.check_for_collision(ghost, self.player_sprite)\n \n if len(wall_hit_list)==0 and len(coin_hit_list)==0 and len(ghost_hit_list)== 0 and (player_hit_list)==0:\n ghost_placed_successfully = True\n\n self.ghost_list.append(ghost)\n\n\n\n # --- END OF IMPORTANT PART ---\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.wall_list)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def update_all_elements(self):\n self.screen.blit(self.background_image, (0, 0))\n self.pad_sprites.draw(self.screen)\n self.obstacle_sprites.draw(self.screen)\n self.meteor_sprites.update()\n self.meteor_sprites.draw(self.screen)\n self.player_sprite.update()\n self.player_sprite.draw(self.screen)\n if not self.lander.is_controllable():\n self.screen.blit(self.alert_instruments, (0, 0))\n self.show_on_screen(\"UNCONTROLLABLE\", (120, 82))\n elif self.lander_failure():\n self.screen.blit(self.alert_instruments, (0, 0))\n self.show_on_screen(\"Failure of \" + str(self.failure), (120, 82))\n else:\n self.screen.blit(self.instruments, (0, 0))\n self.update_lander_meters()", "def update(self):\n\t\tif self.moving_right and self.rect.right < self.screen_rect.right:\n\t\t\tself.center += self.ai_settings.ship_speed_factor\n\t\tif self.moving_left and self.rect.left > 0:\n\t\t\tself.center -= self.ai_settings.ship_speed_factor\t\t\t\n\n\t\tself.rect.centerx = self.center", "def __init__(self, pos):\n\n # initialise parent class\n pygame.sprite.Sprite.__init__(self)\n\n self.x = pos[0]\n self.y = pos[1]\n\n #emergency debug vars\n self.d1 = None\n self.d2 = None\n \n\t # state varibles\n self.idle = True\n self.walk = False\n self.fall = False\n self.climb = False # for ladders and such\n self.climb_mobility = True # moving up and down\n self.action_contact = False # in contact with a switch or such\n self.water_contact = False # in contact with water\n\n # attributes\n self.speed = 3\n self.x_vel = 0\n self.y_vel = 0\n self.jump_power = 10\n self.health = MAX_PLAYER_HEALTH\n self.ammo = 50 # No of fireballs\n self.canShoot = True # can shoot fireball\n\n # item-related attributes\n self.coins = 0\n self.keys = 0\n\n # animation indices\n self.walkPos = 0\n self.walkFact = 8 # every eigth iteration, fetch next image\n self.animSpeed = 2\n\n # special attributes\n self.worldShift = False # shifting the world\n self.canShift = True # atarimae darou\n self.prev_slope = False\n\n self.d1 = self.d2 = 0 # for debugging purposes only\n # extract walking images\n sheet = SpriteSheet(playWalkPath)\n\n # timers for abilities\n self.timer_fire = 0\n self.timer_allstop = 0\n\n self.walk_r_list = list()\n self.walk_l_list = list()\n\n # extract left-facing images\n\n w = 66\n h = 92\n\n self.coords = [(0, 0, w, h), (67, 0, w, h), (134, 0, w, h), \\\n (0, 93, w, h), (67, 93, 64, h), (132, 93, 72, h), \\\n (0, 187, 70, h), (71, 187, 70, h), (142, 187, 70,h), \\\n (0, 279, 70, h), (71, 279, 70, h)]\n\n\n for ind in self.coords:\n image_r = sheet.get_image(*ind).convert() # expand tuple into arugments\n self.walk_r_list.append(image_r)\n image_l = pygame.transform.flip(image_r, True, False)\n self.walk_l_list.append(image_l)\n\n # startup values\n self.rightIdleImg = self.walk_r_list[0]\n self.leftIdleImg = self.walk_l_list[0]\n self.image = self.rightIdleImg\n self.direction = RIGHT\n self.rect = self.image.get_rect(topleft=pos) # set correct position\n\n self.OUTLIST = [False, 0.00] # isShifting, time\n\n self.powerGroup = pygame.sprite.Group()\n self.action_obj = None # stores currently-touching action obj\n\n\n # IGNORABLE VARS\n self.intMousePos = (0, 0)", "def update(self):\r\n if self.__var__ == 'right':\r\n self.rect.x -= self.speedy\r\n elif self.__var__ == 'left':\r\n self.rect.x += self.speedy\r\n elif self.__var__ == 'up':\r\n self.rect.y += self.speedy\r\n elif self.__var__ == 'down':\r\n self.rect.y -= self.speedy\r\n if self.rect.x > WIDTH or self.rect.x < 0 or self.rect.y > HEIGHT or self.rect.y < 0:\r\n ammo.remove(self)", "def update_state(self, clock, bg_image, allsprites):\n # ensure we don't get more than 60fps\n clock.tick(60)\n # update the background\n self.screen.blit(bg_image, (0, 0))\n # update the greeting\n self.draw_greeting()\n # update the sprite(s)\n allsprites.update()\n allsprites.draw(self.screen)\n # draw some snow\n for s in self.snow:\n s.draw(self.screen)", "def update(self):\n # get the new position of the snowman\n newpos = self.rect.move((self.move, 0))\n # handle getting to the edges\n if (self.rect.left < self.area.left or\n self.rect.right > self.area.right):\n # move in the opposite direction\n self.move = -self.move\n # get the new position\n newpos = self.rect.move((self.move, 0))\n # mirror the image (flip it)\n self.image = pygame.transform.flip(self.image, 1, 0)\n self.rect = newpos", "def update(self):\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.center += self.ai_settings.ship_speed_factor\n if self.moving_left and self.rect.left > 0:\n self.center -= self.ai_settings.ship_speed_factor\n\n self.rect.centerx = self.center", "def update(self):\n \n if games.keyboard.is_pressed(games.K_LEFT):\n #check if we reach the edge of the screen, so we do not pass border\n if self.x == 20 or self.x == games.screen.width-20:\n self.x = 40\n else:\n self.x -= 2\n \n if games.keyboard.is_pressed(games.K_RIGHT):\n #check if we reach the edge of the screen, so we do not pass border\n if self.x == 20 or self.x == games.screen.width-20:\n self.x = games.screen.width-40\n else:\n self.x += 2 \n \n if games.keyboard.keypress(games.K_z):\n self.fire_bullet()\n \n self.get_hit()\n \n if self.score.value == 0:\n self.end_game()", "def update(self):\n Enemy.update(self)\n self.update_movement()\n self.update_firing()\n self.surf = self.animation.next_animation()", "def update(self, delta_time):\n if self.current_state == GAME_RUNNING:\n if self.gameover:\n return\n\n self.all_sprites_list.update()\n\n # Game Clock\n self.total_time += delta_time\n\n # flick if it was collision\n if self.collision_time:\n if self.collision_time % 2:\n self.player_sprite.color = arcade.color.AMAZON\n else:\n self.player_sprite.color = arcade.color.WHITE\n\n # Call update on all sprites (The sprites don't do much in this\n # example though.)\n # self.physics_engine.update()\n\n # Generate a list of all enemies that collided with the player.\n ene_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.myobject_list)\n\n # Loop through each colliding sprite, remove it, and add to the score.\n for myobject in ene_hit_list:\n myobject.remove_from_sprite_lists()\n self.numobj -= 1\n self.lives -= 1\n self.collision_time = 50\n self.player_sprite.color = arcade.color.AMAZON\n if (self.numobj < 1):\n self.numobj = STARTING_OBJECTS_COUNT\n self.create_buddies()\n\n if self.lives < 1:\n self.current_state = GAME_OVER\n\n # Generate a list of coins that collided with the player.\n coin_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.coin_list)\n for coin in coin_hit_list:\n coin.remove_from_sprite_lists()\n self.score += 10\n self.ncoins -= 1\n if self.ncoins < 1:\n self.ncoins = COIN_COUNT\n self.create_treasure()\n\n # --- Manage Scrolling ---\n\n if self.line_start == SCREEN_HEIGHT // 2:\n self.line_start = 0\n else:\n self.line_start = self.line_start + 1", "def __init__(self, screen, maze_arrangement):\n pygame.sprite.Sprite.__init__(self)\n \n self.__walk_down = [pygame.image.load(\"./PlayerImages/stand_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_down.png\")]\n \n self.__walk_up = [pygame.image.load(\"./PlayerImages/stand_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_up.png\")] \n \n self.__walk_right = [pygame.image.load(\"./PlayerImages/stand_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_right.png\")]\n \n self.__walk_left = [pygame.image.load(\"./PlayerImages/stand_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_left.png\")]\n \n self.image = self.__walk_down[0]\n self.rect = self.image.get_rect()\n \n # Set direction, current frame index, animation state, and \n self.__direction = \"DOWN\"\n self.__frame_index = 0\n self.__animating = False\n self.__move_length = 0\n \n self.__maze_arrangement = maze_arrangement\n \n self.rect.x = 50\n self.rect.y = 50 \n \n self.__user_x = self.rect.x / 50\n self.__user_y = self.rect.y / 50 \n self.__x = self.rect.x\n self.__y = self.rect.y", "def drawScene(background, backX, mario, marioPic, marioFrame, rectList, breakingBrick, brickPic, coins, moveCoins, coinsPic, mushrooms, itemsPic, enemiesList, enemiesPic, bullets, spriteCount, points, isMuted):\n X, Y, VX, VY, DIR, STATE = 0, 1, 2, 3, 4, 5\n ONGROUND, JUMPFRAMES, INGROUND, ISCROUCH, ONPLATFORM, ISFALLING, ISANIMATING, INVULFRAMES = 0, 1, 2, 3, 4, 5, 6, 7\n BRICKVY, IDLE, TYPE = 4, 5, 6\n ENMYVX, ENMYVY, ENMYIDLE, ENMYINFLOOR = 4, 5, 6, 7\n GUNSTATE, GUNCOUNT, GUNTYPE = 4, 5, 6\n BULLVX, BULLVY = 4, 5\n screen.fill(BLACK) # Clearing screen\n screen.blit(background, (backX, 0)) # Blitting background\n # Blitting moving coins\n for coin in moveCoins: # Going through each coin and defining rects\n coinRect = coin[0], coin[1], coin[2], coin[3]\n screen.blit(coinsPic[1][int(spriteCount // 0.4 % 4)], coinRect)\n # Blitting mushrooms\n for mushroom in mushrooms: # Going through each mushroom and defining rects\n mushRect = Rect(mushroom[0], mushroom[1], mushroom[2], mushroom[3])\n if mushroom[4] == 0: # Checkiong if the moving up animation is done\n screen.blit(itemsPic[0], mushRect)\n # Blitting enemies\n for list in enemiesList: # For each type of enemy in the enemies list\n for enemy in list: # For each individual enemy within that type\n enmyRect = Rect(enemy[0], enemy[1], enemy[2], enemy[3])\n if list == goombas:\n if enemy[ENMYIDLE] == 2: # Checking if enemy is dying\n screen.blit(enemiesPic[0][2], enmyRect)\n else: # Normal animation\n screen.blit(enemiesPic[0][int(spriteCount//6)], enmyRect)\n elif list == spinys: # Same thing as goombas except with spinys\n spinePic = enemiesPic[2][int(spriteCount// 2.4 % 2)]\n if enemy[ENMYVX] > 0: # Checking which direction the enemy is moving (1 or -1)\n spinePic = transform.flip(spinePic, True, False)\n screen.blit(spinePic, enmyRect)\n # Blitting bricks and guns\n for list in rectList: # For each type of bricks\n for brick in list: # For each individual brick within that type of brick\n brickRect = Rect(brick[0], brick[1], brick[2], brick[3]) # Defining the rect of that brick\n if list == interactBricks: # Bliting the correct picture if it is an interactBrick\n screen.blit(brickPic[1][0],brickRect)\n elif list == questionBricks: # Doing the same thing but also checking if the brick has been hit or not\n if brick[IDLE] == 1:\n screen.blit(brickPic[1][1], brickRect)\n else:\n screen.blit(brickPic[0][int(spriteCount//2)],brickRect)\n elif list == gunRects: # Bliting the pictures for the bullet bills\n if brick[GUNTYPE] == 1:\n screen.blit(enemiesPic[1][1], (brickRect.x, brickRect.y))\n elif brick[GUNTYPE] == 2:\n screen.blit(enemiesPic[1][2], (brickRect.x, brickRect.y))\n # Blitting brick debris\n for brick in breakingBrick: # For each break in all breakable bricks making the debris fall out in all 4 directions if broken\n screen.blit(brickPiece[0], (brick[0] - brick[5], brick[1]))\n screen.blit(brickPiece[1], (brick[0] + 21 + brick[5], brick[1]))\n screen.blit(brickPiece[2], (brick[0] - brick[5] / 2, brick[1] + 21))\n screen.blit(brickPiece[3], (brick[0] + 21 + brick[5] / 2, brick[1] + 21))\n # Blitting coins\n for coin in coins: # For each coin in the list of all coins\n coinRect = coin[0], coin[1], coin[2], coin[3] # Defining the coins rect\n screen.blit(coinsPic[0][int(spriteCount // 2)], coinRect) # Bliting the coins sprite\n # Blitting bullet bills\n for bullet in bullets: # going through each bullet and defining the bullets rect\n bullRect = Rect(bullet[0], bullet[1], bullet[2], bullet[3])\n bullPic = enemiesPic[1][0]\n if bullet[BULLVX] > 0:\n bullPic = transform.flip(bullPic, True, False)\n screen.blit(bullPic, bullRect)\n # Blitting flag\n screen.blit(flagPic[0],(flagInfo[0][0],flagInfo[0][1])) # Blitting pole\n screen.blit(flagPic[1],(flagInfo[1][0],flagInfo[1][1])) # Blitting flag\n # Blitting mario\n marioShow = marioPic[marioFrame[0]][int(marioFrame[1])]\n if mario[DIR] == \"Left\":\n marioShow = transform.flip(marioShow, True, False) # Flipping mario's sprite if he's facing left\n if marioStats[INVULFRAMES]%2 == 0 or marioStats[ISANIMATING]: # Checking if mario's sprite should be skipped this frame\n screen.blit(marioShow, (mario[0], mario[1])) # Blitting mario's sprite\n # Blitting floating points\n for point in points:\n pointText = marioFontThin.render(\"%s\" %point[3], False, WHITE) # Rendering the text\n screen.blit(pointText, (point[0], point[1]))\n # Blitting mute icon\n if isMuted:\n screen.blit(mutePic, (735,25))", "def move(self, playersList):\n land = pygame.Rect(283, 468, 635, 210)\n leftCroc = pygame.Rect(300, 373, 105, 20)\n leftStand = pygame.Rect(485, 300, 76, 10)\n rightStand = pygame.Rect(639, 300, 76, 10)\n rightBirb = pygame.Rect(795, 373, 105, 20)\n bottomland = pygame.Rect(0, 750, 1500, 1)\n platformsList = [land, leftCroc, rightBirb, leftStand, rightStand,bottomland]\n\n # Push\n if self.count <= 0:\n self.count = 10\n self.push = False\n self.moveRight = False\n\n if self.push == True and self.moveRight:\n self.count -= 1\n self.rect.x +=(self.count**2)*0.5\n\n if self.push == True and self.moveRight == False:\n self.count -= 1\n self.rect.x -=(self.count**2)*0.5\n\n # mover horizontal\n if self.left:\n self.rect.x -= self.speed_x\n if self.right:\n self.rect.x += self.speed_x\n\n # Revisar si pegó con la tierra\n if self.rect.colliderect(land) and self.rect.bottom > land.top + 50: # ///// Aquí el land.top+n lo tiene que ajustar para que sea como el piso máximo de los maecitos\n if self.left:\n self.rect.left = land.right\n elif self.right:\n self.rect.right = land.left\n\n # revisar si está en medio de una plataforma (por si está saltando a través de una)\n insidePlatform = self.rect.collidelist(platformsList)\n # mover vertical\n if self.falling or self.rising:\n self.rect.y += self.speed_y\n for platform in platformsList:\n # si choca con una plataforma\n if self.rect.colliderect(bottomland):\n self.rect.bottom = land.top\n self.rect.x = 750\n if self.rect.colliderect(platform):\n # y específicamente está cayéndose y no es la plataforma que ya vimos por la que estaba pasando\n if self.falling and platform is not platformsList[insidePlatform]:\n self.rect.bottom = platform.top\n self.speed_y = 0\n self.falling = False\n\n # Manejar velocidad vertical\n if self.falling or self.rising: # lo vuelvo a revisar porque pudo haber parado de estar falling al caer en una plataforma\n self.speed_y += 1 # /////Aquí se cambia la aceleración vertical del jugador//////\n if self.speed_y > 20:\n self.speed_y = 20 # /////Aquí se cambia la velocidad máxima de caída/////\n if self.rising and self.speed_y > 0: # aquí es si pasa de estar subiendo a estar cayendo\n self.rising = False\n self.falling = True\n\n # si se queda sin plataforma por debajo\n if self.falling == False and self.rising == False:\n self.falling = True\n tryoutRect = self.rect.copy()\n tryoutRect.y += 1\n for platform in platformsList:\n # si choca con una plataforma\n if tryoutRect.colliderect(platform):\n self.falling = False\n\n self.collide(playersList)", "def update_menu(self):\n x_pos = (self.menu_x - self.block_x) // 2 + self.offset_x\n y_pos = self.offset_y + 50 # account for bottom text\n self.menu_sprites[0].image = self.sprite_types[self.curr_sprite]\n for sprite in self.menu_sprites:\n sprite.x = x_pos\n sprite.y = y_pos + sprite.image.height\n sprite.update()\n y_pos += self.offset_y + sprite.image.height", "def move_sprite(self, new_pos):\n self.rect.x = new_pos[1] * SPRITE_SIZE\n self.rect.y = new_pos[0] * SPRITE_SIZE", "def update(self):\n if self.moving_left and self.rect.left > 0:\n self.center -= self.settings.rocket_speed_factor\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.center += self.settings.rocket_speed_factor\n if self.moving_up and self.rect.top > 0:\n self.bottom -= self.settings.rocket_speed_factor\n if self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n self.bottom += self.settings.rocket_speed_factor\n\n # Update rect object from self.center.\n self.rect.centerx = self.center\n self.rect.bottom = self.bottom", "def update(self):\n\t\tif self.moving_right and self.rect.right < self.screen_rect.right:\n\t\t\tself.x += self.settings_sky.ava_speed\n\t\tif self.moving_left and self.rect.left > 0:\n\t\t\tself.x -= self.settings_sky.ava_speed\n\t\tif self.moving_up and self.rect.top > 0:\n\t\t\tself.y -= self.settings_sky.ava_speed\n\t\tif self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n\t\t\tself.y += self.settings_sky.ava_speed\n\n\t\t#Update rect object from self.x and self.y\n\t\tself.rect.x = self.x\n\t\tself.rect.y = self.y", "def _step_sprite(self, sprite, updates_per_env_step=1):\n pass", "def _step_sprite(self, sprite, updates_per_env_step=1):\n if np.isinf(sprite.mass):\n return\n\n position, velocity, entering_intersection = self._get_pos_vel(\n sprite, updates_per_env_step=updates_per_env_step)\n nearest_inds = self._get_nearest_point(position)\n \n # If sprite is entering an intersection or stationary, find the valid\n # directions to move in.\n if entering_intersection:\n valid_directions = self._maze.valid_directions(\n nearest_inds[0], nearest_inds[1])\n self._update_valid_directions(valid_directions, velocity)\n elif np.all(velocity == 0.):\n rounded_position = (\n self._maze.half_grid_side + nearest_inds * self._maze.grid_side)\n on_grid = np.abs(rounded_position - position) < _EPSILON\n if np.all(on_grid):\n valid_directions = self._maze.valid_directions(\n nearest_inds[0], nearest_inds[1])\n else:\n valid_directions = np.zeros((2, 2))\n valid_directions[1 - np.argmax(on_grid)] = 1\n else:\n sprite.velocity = velocity\n return\n \n # Sample new direction to move in\n sample = valid_directions * np.random.rand(2, 2)\n sample_ind = np.argmax(np.ravel(sample))\n\n # Update velocity to move in new direction, but don't eliminate current\n # velocity as that might be needed to get us to the intersection\n velocity[sample_ind // 2] = (\n (1 + _EPSILON) * self._speed * (2 * (sample_ind % 2) - 1))\n sprite.velocity = velocity", "def update(self):\r\n # updates the float position number of the bullet\r\n self.y -= self.settings.bullet_speed\r\n # updates the position of the rectangle\r\n self.rect.y = self.y", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n for ufo in self.ufos.sprites():\n ufo.blitme()\n #if self.missile.shooting_missile or self.missile.missile_shot:\n for missile in self.missiles.sprites():\n missile.blitme()\n\n pygame.display.flip()", "def update(self):\r\n \r\n # Some state information for debugging purposes; generally not necessary to print.\r\n #print \"Walking:\", self.state['walking'], \" Running:\", self.state['running'], \" Jumping:\", self.state['jumping']\r\n \r\n # The action currently in process has reached its last sprite image and is about to loop.\r\n # Certain actions require an interruption. Those interruptions are placed here.\r\n if self.state['lastisdone']:\r\n # Landing\r\n if self.state['last'] == 'LAND':\r\n # Landing should only loop once, then transition to standing\r\n self.stand()\r\n # Make sure to check whether the character should be walking\r\n if self.state['ldown'] or self.state['rdown']: self.walk()\r\n \r\n # Jumping\r\n elif self.state['last'] == 'JUMP':\r\n # Jumping should loop on the final frame until a predetermined height is reached\r\n if self.y > self.state['floorheightnow'] - self.sprite_height - self.jump_height:\r\n self.actions['JUMP'].curr_step = self.actions['JUMP'].step_duration * (self.actions['JUMP'].num_steps - 1)\r\n # Once the height is reached, transition to falling\r\n else:\r\n self.nojump()\r\n self.fall()\r\n \r\n # Falling\r\n elif self.state['last'] == 'FALL':\r\n # Falling should loop until one of two things happens:\r\n # If the floor has not yet been reached,\r\n groundtest = self.terrain.groundtest(self.pos())\r\n if not groundtest[0]:\r\n # Increase the fall counter\r\n self.state['fallcount'] += 1\r\n # Check to see whether a predetermined number of loops has been reached\r\n # If it has, transition to falling fast\r\n if self.state['fallcount'] > 100:\r\n self.state['fallcount'] = 0\r\n self.nofall()\r\n self.fallfast()\r\n # If the floor has been reached or surpassed, move to floor height and transition to landing\r\n else:\r\n self.y = groundtest[1][1] - self.sprite_height\r\n self.nofall()\r\n self.land()\r\n \r\n # Falling Fast\r\n elif self.state['last'] == 'FALL_FAST':\r\n # If the floor has been reached or surpassed, move to floor height and transition to landing\r\n groundtest = self.terrain.groundtest(self.pos)\r\n if groundtest[0]:\r\n self.y = groundtest[1]-self.sprite_height\r\n self.nofallfast()\r\n self.land()\r\n\r\n # Always reset the 'lastisdone' flag\r\n self.state['lastisdone'] = False\r\n \r\n \r\n \r\n \r\n \r\n \r\n # Default to standing, unless something below changes the state\r\n self.state['action'] = 'STAND'\r\n \r\n # Walking\r\n if self.state['walking']:\r\n # If either shift key is held, set to running\r\n mods = pygame.key.get_mods()\r\n if mods & pygame.KMOD_LSHIFT or mods & pygame.KMOD_RSHIFT:\r\n self.nowalk()\r\n self.run()\r\n # Otherwise, move forward a preset distance and render the WALK action sprite\r\n else:\r\n self.state['action'] = 'WALK'\r\n self.move((self.walk_step if not self.state['left'] else -self.walk_step, 0))\r\n groundtest = self.terrain.groundtest(self.pos())\r\n if groundtest[0]:\r\n self.y = groundtest[1][1]-self.sprite_height\r\n \r\n # Running\r\n if self.state['running']:\r\n # Move forward a preset distance and render the RUN action sprite\r\n self.state['action'] = 'RUN'\r\n self.move(((self.walk_step if not self.state['left'] else -self.walk_step)*self.fast_factor, 0))\r\n groundtest = self.terrain.groundtest(self.pos)\r\n if groundtest[0]:\r\n self.y = groundtest[1]-self.sprite_height\r\n mods = pygame.key.get_mods()\r\n # If shift is no longer being held, make sure the next frame walking happens\r\n if not (mods & pygame.KMOD_LSHIFT or mods & pygame.KMOD_RSHIFT):\r\n self.norun()\r\n self.walk()\r\n \r\n # Jumping\r\n if self.state['jumping']: \r\n # Move up a preset distance and render the JUMP action sprite\r\n self.state['action'] = 'JUMP'\r\n self.move((0,-self.fall_step))\r\n \r\n # Falling\r\n # This action currently happens only as the result of a jump action\r\n if self.state['falling']:\r\n # Move down a preset distance and render the FALL action sprite\r\n self.state['action'] = 'FALL'\r\n self.move((0,self.fall_step))\r\n \r\n # Falling Fast\r\n # This action is intended to happen only when the character has been falling for a while\r\n if self.state['fallingfast']:\r\n # Move down a little farther than in a standard falling action and render the FALL_FAST action sprite\r\n self.state['action'] = 'FALL_FAST'\r\n self.move((0,self.fall_step*self.fast_factor))\r\n \r\n # Landing\r\n # This action should only happen after falling\r\n if self.state['landing']:\r\n self.state['action'] = 'LAND'\r\n \r\n if self.actions[self.state['action']].curr_step/self.actions[self.state['action']].step_duration == self.actions[self.state['action']].num_steps - 1:\r\n self.state['lastisdone'] = True\r\n \r\n if self.state['action'] != self.state['last']:\r\n self.actions[self.state['last']].reset()\r\n \r\n self.state['last'] = self.state['action']", "def _move_forward(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tif(self.y<=798):\n\t\t\tself.y = self.y+1\n\t\t\tif Board.board[self.x][self.y]=='0':\n\t\t\t\tMario.score += 1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='P':\n\t\t\t\tMario.lives+=1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_1-up.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='A':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tMario.attack = 1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_powerup.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='@':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tMario.lives-=1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_mariodie.wav\"])\n\t\t\t\tif Mario.lives<=0:\n\t\t\t\t\tcall([\"aplay\",\"-q\",\"smb_gameover.wav\"])\n\t\t\t\t\treturn \"exit\"\n\t\t\t\tos.system('clear')\n\t\t\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\t\\tNumber of Mario left\",Mario.lives)\n\t\t\t\tMario.respawn(self.x,self.y)\n\t\t\t\ttime.sleep(2)\n\t\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\n\t\t\telif(Board.board[self.x][self.y]=='/'):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x-1][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='I':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tBoard.bonus_round()\n\n\t\t\telif Board.board[self.x][self.y]=='K':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tenemy.boss_round()\n\n\t\t\telif(Board.board[self.x][self.y] in obstacles):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y-1]='M'\n\n\t\t\telif((Board.board[self.x+1][self.y-1]=='/' or Board.board[self.x+1][self.y-1]=='T') and Board.board[self.x+1][self.y]==' '):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y+1]='M'\n\t\t\t\tMario.go_down(self)\n\t\t\telse:\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\tif( self.y-1 >= ((Board.prev_j+Board.prev_k)/2) ):\n\t\t\tos.system('clear')\n\t\t\tBoard.prev_j += 1 \n\t\t\tBoard.prev_k += 1\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\telse:\n\t\t\tos.system('clear')\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)", "def update(self):\n # Move left/right=====\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n visited[int(self.rect.x/32)][int(self.rect.y/32)].append(self.id)\n\n self.path.append((int(self.rect.x/32), int(self.rect.y/32)))\n\n # if(self.rect.x == goal_x) & (self.rect.y == goal_y):\n # pygame.quit()\n # sys.exit(0)\n\n self.change_x = 0\n self.change_y = 0", "def setup(self):\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n\n # https://opengameart.org/content/animated-top-down-survivor-player\n # Set up the player\n self.player_sprite = Player(\"survivor-idle_rifle_0.png\", 0.5)\n self.player_sprite.center_x = SCREEN_WIDTH / 2\n self.player_sprite.center_y = SCREEN_HEIGHT / 2\n self.player_list.append(self.player_sprite)\n self.wall_list = arcade.SpriteList()\n self.chest_list = arcade.SpriteList()\n self.bullet_list = arcade.SpriteList()\n\n self.score = 0\n\n\n # Set up the player\n # https://opengameart.org/content/animated-top-down-survivor-player\n\n\n\n # -- Set up several columns of walls\n for x in range(-700, 1700, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = x\n wall.center_y = -300\n self.wall_list.append(wall)\n for x in range(-700, 1700, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = x\n wall.center_y = 1025\n self.wall_list.append(wall)\n for y in range(-300, 1025, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = -700\n wall.center_y = y\n self.wall_list.append(wall)\n for y in range(-300, 1025, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = 1700\n wall.center_y = y\n self.wall_list.append(wall)\n\n\n# https://www.pinterest.com/pin/258042253625289337\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.wall_list)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BRITISH_RACING_GREEN)\n\n # Set the viewport boundaries\n # These numbers set where we have 'scrolled' to.\n self.view_left = 0\n self.view_bottom = 0", "def update(self, ai_settings):\n if self.moving_up and self.rect.top > 0:\n self.rect.centery -= ai_settings.paddle_speed_factor\n\n if self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n self.rect.centery += ai_settings.paddle_speed_factor\n\n if self.moving_left and self.rect.left > 0:\n self.rect.centerx -= ai_settings.paddle_speed_factor\n\n if self.moving_right and self.rect.right < self.screen_rect.centerx:\n self.rect.centerx += ai_settings.paddle_speed_factor", "def real_time_update(self, dt):\n\n self.t_minus -= dt\n\n # copy the permanent features before we add on tanks,bullets,etc\n self.board = copy.deepcopy(self.perma_board)\n\n # CURRENTLY WE CLEAR THE GHOST_BOARD EVERY FRAME\n # THIS MAY CHANGE IN THE FUTURE AS WE ADD MORE ANIMATIONS\n self.ghost_board = []\n for i in range(64):\n self.ghost_board += [[EMPTY]*64]\n\n # bullets move first thus if they get shot they can escape their mama tank\n for b in self.bullets:\n\n # move the bullet\n b.move(dt)\n pos = b.get_pixel_pos()\n x = pos[0]\n y = pos[1]\n\n # kill the bullet if it hits a wall\n if (x < 0) or (y < 0) or (x > 63) or (y > 63):\n self.bullets.remove(b)\n elif (self.board[y][x] == WALL):\n self.bullets.remove(b)\n else:\n self.board[y][x] = BULLET\n\n # then tanks move\n for k in self.tanks.keys():\n\n if k:\n\n t = self.tanks[k]\n\n t.move(dt)\n\n # check to see if the tank hits a wall\n positions = t.get_pixel_pos() # <-- actually 9 positions\n for p in positions:\n x = p[0]\n y = p[1]\n # if you hit a wall or go off the edge of the screen, or we hit a tank, don't move\n if (x < 0) or (y < 0) or (x > 63) or (y > 63) or (self.board[y][x] == WALL) or (self.board[y][x] < 10):\n t.move(-1.0*dt)\n break\n\n # update the pixels on the board\n positions = t.get_pixel_pos() # <-- actually 9 positions\n for p in positions:\n x = p[0]\n y = p[1]\n # if you hit a bullet:\n # find the bullet, kill it, take damage, record your aggressor\n if (self.board[y][x] == BULLET) and not t.is_dead():\n for b in self.bullets:\n b_pos = b.get_pixel_pos()\n b_x = b_pos[0]\n b_y = b_pos[1]\n\n if (x==b_x) and (y==b_y):\n bullet_id = b.ID\n self.bullets.remove(b)\n t.damage(BULLET_DM)\n t.damage_IDs += [bullet_id]\n if bullet_id in self.tanks:\n self.tanks[bullet_id].score += 1\n self.scores[bullet_id] += 1\n if t.is_dead():\n self.scomm.death_event(b.ID, t.ID)\n self.return_color(t)\n t.cleanup()\n del self.tanks[k]\n break\n\n # if you're on the hospital, heal yourself\n elif (self.board[y][x] == HOSPITAL) and (not t.recently_healed):\n t.heal(HOSPITAL_RATE, dt)\n t.recently_healed = True\n # finally set the pixel to be a tank\n self.board[y][x] = t.color\n\n # once the tank is done moving, reset so it can be healed next update\n t.recently_healed = False\n\n # if t died, reset any pixels that were written before the tank died\n if t.is_dead():\n for p in positions:\n if self.board[y][x] == t.color:\n self.board[y][x] = EMPTY\n\n # otherwise add the \"eye\" of the tank to the ghost_board\n # according to the direction in the map below, which \n # corresponds to the angle the tank is pointing\n #\n # 1 2 3\n # 0 x 4\n # 7 6 5\n #\n else:\n t_angle_scaled = (int(round(math.atan2(t.y_vel,t.x_vel)*8/(2*math.pi)))+4)%8\n eye_x = int(round(t.x_pos))\n eye_y = int(round(t.y_pos))\n if t_angle_scaled == 0:\n eye_x -= 1\n elif t_angle_scaled == 1:\n eye_x -= 1\n eye_y -= 1\n elif t_angle_scaled == 2:\n eye_y -= 1\n elif t_angle_scaled == 3:\n eye_y -= 1\n eye_x += 1\n elif t_angle_scaled == 4:\n eye_x += 1\n elif t_angle_scaled == 5:\n eye_x += 1\n eye_y += 1\n elif t_angle_scaled == 6:\n eye_y += 1\n elif t_angle_scaled == 7:\n eye_x -= 1\n eye_y += 1\n\n self.ghost_board[eye_y][eye_x] = EYE\n\n # finally draw the ghost board over the regular board\n for y in range(len(self.board)):\n for x in range(len(self.board[y])):\n ghost = self.ghost_board[y][x]\n if ghost != EMPTY:\n self.board[y][x] = ghost", "def update(self):\n self.rect.y -= self.y_speed # Pawns move up the screen at the speed specified", "def update(self):\n # Sine and Cosine work in degrees, so we have to convert them\n direction_radians = math.radians(self.direction)\n \n # Change the position (x and y) according to the speed and direction\n self.x += self.speed * math.sin(direction_radians)\n self.y -= self.speed * math.cos(direction_radians)\n \n # Move the image to where our x and y are\n self.rect.x = self.x\n self.rect.y = self.y\n \n # Do we bounce off the top of the screen?\n if self.y <= 0:\n self.bounce(0)\n self.y = 1\n \n # Do we bounce off the left of the screen?\n if self.x <= 0:\n self.direction = (360 - self.direction) % 360\n self.x = 1\n \n # Do we bounce of the right side of the screen?\n if self.x > self.screenwidth - self.width:\n self.direction = (360 - self.direction) % 360\n self.x = self.screenwidth - self.width - 1\n \n # Did we fall off the bottom edge of the screen?\n if self.y > 600:\n return True\n else:\n return False", "def Update(self, AutoUpdate=True):\n if self.isAlive and AutoUpdate:\n Ant.antArray.append(self)\n self.TicksLeft -= 1\n if self.TicksLeft <= 0:\n self.TicksLeft = self.speedScalar-self.speed\n pix = self.display.get_at((self.x,self.y))\n if pix == Colors.A_black:\n # set current tile to white\n Ant.updateArray.append(pygame.Rect(self.x,self.y,1,1))\n self.display.set_at((self.x,self.y), Colors.A_white)\n # turn left and move\n self.MoveLeftStep()\n elif pix == Colors.A_Fire:\n self.isAlive = False\n else:\n if pix == Colors.A_Crazy:\n self.lifeSpan -= 1\n if(self.lifeSpan <= 0):\n self.isAlive = False\n # set current tile to white\n Ant.updateArray.append(pygame.Rect(self.x,self.y,1,1))\n self.display.fill(Colors.A_black, ((self.x,self.y), (1,1)))\n # turn right and move\n self.MoveRightStep()", "def start(self):\n self.running = True\n\n while self.running:\n font = pygame.font.Font(None, 30)\n text = font.render(f\"Inventory ({self.hero.inventory})\", 1, (1, 0, 0))\n control = font.render(f\"Move with:\", 1, (1, 0, 0))\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(text, (10, 490))\n self.screen.blit(start, (0, 0))\n self.screen.blit(control, (320, 490))\n self.screen.blit(control_keyboard, (428, 476))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.hero.move(up)\n\n elif event.key == pygame.K_DOWN:\n self.hero.move(down)\n\n elif event.key == pygame.K_RIGHT:\n self.hero.move(right)\n\n elif event.key == pygame.K_LEFT:\n self.hero.move(left)\n \n elif event.key == pygame.K_RETURN or pygame.K_KP_ENTER or pygame.K_q:\n self.running = False\n\n if self.hero.inventory == 3 and self.hero.position == (14, 14):\n self.screen.blit(won, (50, 200))\n self.hero.position = (14, 14)\n\n elif self.hero.inventory != 3 and self.hero.position == (14, 14):\n self.screen.blit(lose, (50, 200))\n self.hero.position = (14, 14)\n\n self.allsprites.update()\n self.allsprites.draw(self.screen)\n pygame.display.update()", "def update(self):\n self.x += (self.speed * self.x_direction)\n self.y += (self.speed * self.y_direction)\n self.ball_sprite.update(self.x, self.y)\n\n if self.x < 0 or (self.x + self.width) > config.WINDOW_WIDTH:\n self.x_direction *= -1\n\n if self.y < 0 or (self.y + self.height) > config.WINDOW_HEIGHT:\n self.y_direction *= -1", "def update(self):\r\n # Update the ship's center value, not the rect.\r\n if self.moving_right and self.rect.right < self.screen_rect.right/2:\r\n self.centerx += self.settings.player_speed\r\n if self.moving_left and self.rect.left > 0:\r\n self.centerx -= self.settings.player_speed\r\n if self.moving_down and self.rect.bottom < self.screen_rect.bottom:\r\n self.centery += self.settings.player_speed\r\n if self.moving_up and self.rect.top > 0:\r\n self.centery -= self.settings.player_speed\r\n \r\n # Update rect object from self.center.\r\n self.rect.centerx = self.centerx\r\n self.rect.centery = self.centery", "def move(self):\r\n piece = []\r\n if self.direction == \"UP\":\r\n piece = [self.body[0][0], self.body[0][1] - self.POS_CHANGE] # create piece at new coordinates\r\n elif self.direction == \"DOWN\":\r\n piece = [self.body[0][0], self.body[0][1] + self.POS_CHANGE]\r\n elif self.direction == \"LEFT\":\r\n piece = [self.body[0][0] - self.POS_CHANGE, self.body[0][1]]\r\n elif self.direction == \"RIGHT\":\r\n piece = [self.body[0][0] + self.POS_CHANGE, self.body[0][1]]\r\n\r\n if piece:\r\n if piece in self.body: # Lose game if snake touches itself\r\n self.alive = False\r\n else:\r\n self.body.insert(0, piece) # insert new piece at head of snake\r\n if len(self.body) > self.length:\r\n self.body.pop() # delete last piece of snake, if length isnt increased\r\n\r\n self.draw_snake()", "def syncSpriteCoordinates(self):\n oldSpriteCoords=self.canvasIGetDrawnOn.coords(self.spriteOnCanvas)\n deltaCoords=[self.xPos-oldSpriteCoords[0],self.yPos-oldSpriteCoords[1]]\n self.canvasIGetDrawnOn.move(self.spriteOnCanvas,*deltaCoords)", "def move(self, direction):\n\n # Move to the right\n if direction == 'right':\n if self.square_x < (NUMBER_OF_SPRITES -1):\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y][self.square_x + 1] != 'w':\n # Move by one square right on X axis\n self.square_x += 1\n # Calculation of the \"Real\" positioning in pixels\n self.x_pos = self.square_x * SPRITES_SIZE\n\n # Move to the left\n if direction == 'left':\n if self.square_x > 0:\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y][self.square_x - 1] != 'w':\n # Move by one square left on X axis\n self.square_x -= 1\n # Calculation of the \"Real\" positioning in pixels\n self.x_pos = self.square_x * SPRITES_SIZE\n\n # Move up\n if direction == 'up':\n if self.square_y > 0:\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y-1][self.square_x] != 'w':\n # Move by one square up on Y axis\n self.square_y -= 1\n # Calculation of the \"Real\" positioning in pixels\n self.y_pos = self.square_y * SPRITES_SIZE\n\n # Move down\n if direction == 'down':\n if self.square_y < (NUMBER_OF_SPRITES -1):\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y + 1][self.square_x] != 'w':\n # Move by one square down on Y axis\n self.square_y += 1\n # Calculation of the \"Real\" positioning in pixels\n self.y_pos = self.square_y * SPRITES_SIZE", "def update(self):\r\n # Desplaza el bloque un píxel hacia abajo. s\r\n if self.rect.left < 50 or self.rect.right > 600:\r\n self.speed[0] = -self.speed[0]\r\n if self.rect.top < 0 or self.rect.bottom > 200:\r\n self.speed[1] = -self.speed[1]\r\n self.rect.move_ip((self.speed[0], self.speed[1])) \r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-400,-200)\r\n self.rect.y += 5", "def run(self):\n\n # avviso l'utente sul livello che sta per partire il livello di\n # gioco corrente\n #self.drawMessage(\" Livello %s \" % (self.levelIndex + 1), 2000)\n self.state = GameState.PLAYING\n\n # variabili per la gestione del tempo\n # del corrente livello di gioco\n self.start = time.time()\n self.remaining_time = self.time\n pause_time = 0\n\n # incremento della velocita del giocatore\n # che aumento se si tiene premuta la barra spaziatrice\n vel_inc = 0\n\n # memorizzo il tipo di movimento da far eseguire al player\n player_move_action = self.player.sprite.stop\n\n\n # ciclo principale del gioco\n while not self.done:\n\n # gestione degli eventi\n for ev in pygame.event.get():\n\n if ev.type == QUIT:\n self.done = True\n elif ev.type == KEYDOWN:\n if ev.key == K_q:\n self.done = True\n\n # verifico per lo sparo\n elif ev.key == K_LCTRL:\n self.player.sprite.shot(self.player_bullets_filename, self.playerBullets, self.sound_weapon)\n elif ev.key == K_SPACE:\n vel_inc = 1\n elif ev.key == K_LEFT:\n player_move_action = self.player.sprite.moveLeft\n elif ev.key == K_RIGHT:\n player_move_action = self.player.sprite.moveRight\n elif ev.key == K_UP:\n player_move_action = self.player.sprite.moveUp\n elif ev.key == K_DOWN:\n player_move_action = self.player.sprite.moveDown\n elif ev.key == K_r:\n self.setupLevel()\n self.run()\n elif ev.key == K_n:\n self.levelIndex += 1\n self.setupLevel()\n self.run()\n elif ev.key == K_b:\n self.levelIndex -= 1\n self.setupLevel()\n self.run()\n elif ev.key == K_p or ev.key==K_h:\n if self.state == GameState.PLAYING:\n # registro l'istante in cui è avvenuta la pausa\n pause_time = time.time()\n if ev.key==K_p:\n self.state = GameState.PAUSED\n else:\n self.state = GameState.HELP\n else:\n # ricavo il tempo passato da quando sono in pausa\n dtime = time.time() - pause_time\n # aumento il numero di secondi del tempo iniziale\n # con quelli trascorsi durante la pausa\n self.start += dtime\n self.state = GameState.PLAYING\n print(\"Stato:%s\" % self.state)\n elif ev.type == KEYUP:\n if ev.key == K_SPACE:\n vel_inc = 0\n elif ev.key != K_LCTRL:\n player_move_action =self.player.sprite.stop\n\n # se ho un joystick collegato lo uso per gli spostamenti\n if self.joystick!=None:\n velx = self.joystick.get_axis(0)\n vely = self.joystick.get_axis(1)\n #print(\"%s %s\" % (velx,vely))\n self.player.sprite.move(velx,vely)\n # controllo se ho premuto un pulsante per lo sparo\n if ev.type == pygame.JOYBUTTONDOWN:\n self.player.sprite.shot(self.player_bullets_filename, self.playerBullets, self.sound_weapon)\n # altrimenti uso le informazioni ricevute dalla tastiera\n else:\n # aziono il movimento del giocatore sulla base\n # della combinazione dei tasti premuti\n player_move_action(vel_inc)\n\n # aggiorno la posizione del giocatore e\n # dei nemici, verifico collisioni e poteri\n # solo se sono in fase di gioco\n\n if self.state == GameState.PLAYING:\n self.remaining_time = self.get_remaining_time()\n if self.remaining_time <= 0:\n self.removeLife()\n\n # aggiorno le posizioni di tutti gli sprite\n self.updateAllSpritesPositions()\n\n # gestisco le collisioni di tutti gli sprite\n self.handleAllSpritesCollisions()\n\n # gestisco i vari poteri del player\n self.handlePowers()\n\n\n # verifico se ho, in un modo o nell'altro.\n # rimosso tutte le monete..in tal caso\n # dichiaro il livello completato\n if len(self.coins.sprites()) ==0:\n if self.levelIndex>=len(levels)-1:\n self.state = GameState.PLAYER_WON\n self.done = True\n else:\n self.state = GameState.LEVEL_COMPLETED\n\n #\n # Aggiornamento dello schermo\n #\n\n #cancello lo schermo\n self.screen.fill((0, 0, 0))\n\n\n # il labirinto lo inserisco nella superficie scrollabile\n self.scrollSurface.blit(self.mazeSurf,self.mazeSurf.get_rect())\n\n # disegno tutti gli sprite di gioco\n self.drawAllSprites()\n\n\n # centro la superficie del labirinto rispetto al centro del giocatore\n sc_x = self.screen.get_rect().center[0] - self.player.sprite.rect.center[0]\n sc_y = self.screen.get_rect().center[1] - self.player.sprite.rect.center[1]\n scrollSurfaceRect = Rect((sc_x,sc_y+self.gamebarSurface.get_rect().height),(self.scrollSurface.get_rect().width, self.scrollSurface.get_rect().height))\n\n if (self.background_image!=None):\n self.screen.blit(self.background_image,(0,0))\n self.screen.blit(self.scrollSurface, scrollSurfaceRect)\n\n # disegno la barra di informazioni di gioco\n self.drawNewGamebarSurface()\n\n # gestisco la logica di gioco sulla base dello stato corrente\n self.handleGameState()\n\n # riporto tutto a video\n # n.b: se ometto la seguente istruzione non vedo nulla!\n pygame.display.flip()\n\n # scandisco la velocità del gioco\n self.clock.tick(self.clockTime)\n\n\n # --- Uscita dal gioco\n pygame.mixer.music.stop()\n while pygame.mixer.get_busy():\n self.clock.tick(30)\n\n print(\"Uscita\")\n pygame.quit()\n sys.exit()", "def update(self):\n if self.runningForward or self.runningBackward:\n self.imagecounter +=1\n if self.imagecounter > 7:\n self.imagecounter = 0\n else: \n imagecounter=0\n self.image = pygame.image.load(self.pictures[self.imagecounter])\n if self.flip:\n self.image = transform.flip(self.image,True,False)\n self.rect = self.image.get_rect()\n self.rect.left = self.x\n self.rect.top = self.y\n\n if self.hittingCeilling or self.onGround:\n self.fx = self.ff\n self.fy = self.fg + self.fn\n else:\n self.fx = self.ff + self.fn\n self.fy = self.fg\n\n self.vx += self.fx/self.mass\n self.vy += self.fy/self.mass\n\n if self.vy > self.terminalVelocity:\n self.vy = self.terminalVelocity\n\n self.lastX = self.x\n self.lastY = self.y\n \n self.x += int(self.vx)\n self.y += int(self.vy)\n\n self.onGround = False\n self.hittingCeilling = False\n self.hittingWallRight = False\n self.hittingWallLeft = False\n\n if self.lastVx != 0:\n if self.vx/self.lastVx < 0:\n self.flip = not self.flip\n\n self.rect.center = ((int(self.x)),int(self.y))", "def update(self):\n # update the float of bullet position\n self.y -= self.speed_factor\n # update the bullet position\n self.rect.y = self.y", "def update_sprite(self):\n rotate = False\n state = self.state_machine.current\n if state == 'offload':\n self.image = sprite_bank.retrieve(\"bee_hidden_sprite\")\n elif state == 'harvest' and self.harvesting_pollen:\n self.wings_up = False\n self.image = self.wings_down_sprite\n else:\n\n if self.wings_up:\n self.image = self.wings_down_sprite\n self.wings_up = False\n else:\n self.image = self.wings_up_sprite\n self.wings_up = True\n\n if self.animation_loop == animation_fps:\n self.animation_loop = 0\n else:\n self.animation_loop = self.animation_loop + 1\n rotate = True\n\n if rotate:\n angle = 270 - math.atan2(self.target_destination.y - self.location.y,\n self.target_destination.x - self.location.x) * 180 / math.pi\n self.image = transform.rotate(self.image, angle)\n\n self.rect.width = self.image.get_rect().width\n self.rect.height = self.image.get_rect().height\n\n self.location = Vector2(self.rect.left + self.rect.width / 2,\n self.rect.top + self.rect.height / 2)", "def update(self):\r\n # Update the center value of the ship instead of rect\r\n if self.moving_right and self.rect.right < self.screen_rect.right:\r\n self.center += self.ai_settings.ship_speed_factor\r\n if self.moving_left and self.rect.left > 0:\r\n self.center -= self.ai_settings.ship_speed_factor\r\n # according to self.center Update rect object\r\n self.rect.centerx = self.center", "def update(self, delta_time):\n\n if self.joystick_1:\n print(self.joystick_1.x, self.joystick_1.y)\n if abs(self.joystick_1.x) < DEAD_ZONE:\n self.player_sprite.change_x = 0\n else:\n self.player_sprite.change_x = self.joystick_1.x * MOVEMENT_SPEED\n\n if abs(self.joystick_1.y) < DEAD_ZONE:\n self.player_sprite.change_y = 0\n else:\n self.player_sprite.change_y = -self.joystick_1.y * MOVEMENT_SPEED\n\n self.player_list.update()\n\n self.bullet_list.update()\n\n hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.chest_list\n )\n\n for chest in hit_list:\n chest.remove_from_sprite_lists()\n self.score += 1\n\n # Call update on all sprites (The sprites don't do much in this\n # example though.)\n self.physics_engine.update()\n\n # --- Manage Scrolling ---\n\n # Keep track of if we changed the boundary. We don't want to call the\n # set_viewport command if we didn't change the view port.\n changed = False\n\n # Scroll left\n left_boundary = self.view_left + VIEWPORT_MARGIN\n if self.player_sprite.left < left_boundary:\n self.view_left -= left_boundary - self.player_sprite.left\n changed = True\n\n # Scroll right\n right_boundary = self.view_left + SCREEN_WIDTH - VIEWPORT_MARGIN\n if self.player_sprite.right > right_boundary:\n self.view_left += self.player_sprite.right - right_boundary\n changed = True\n\n # Scroll up\n top_boundary = self.view_bottom + SCREEN_HEIGHT - VIEWPORT_MARGIN\n if self.player_sprite.top > top_boundary:\n self.view_bottom += self.player_sprite.top - top_boundary\n changed = True\n\n # Scroll down\n bottom_boundary = self.view_bottom + VIEWPORT_MARGIN\n if self.player_sprite.bottom < bottom_boundary:\n self.view_bottom -= bottom_boundary - self.player_sprite.bottom\n changed = True\n\n # Make sure our boundaries are integer values. While the view port does\n # support floating point numbers, for this application we want every pixel\n # in the view port to map directly onto a pixel on the screen. We don't want\n # any rounding errors.\n self.view_left = int(self.view_left)\n self.view_bottom = int(self.view_bottom)\n\n # If we changed the boundary values, update the view port to match\n if changed:\n arcade.set_viewport(self.view_left,\n SCREEN_WIDTH + self.view_left - 1,\n self.view_bottom,\n SCREEN_HEIGHT + self.view_bottom - 1)\n\n if hit_list:\n arcade.play_sound(self.chest_sound)", "def update(self, pressed_keys):\r\n # read key presses in event log and change position accordingly\r\n if pressed_keys[K_UP]:\r\n if self.direction == \"down\":\r\n pass\r\n else:\r\n self.yChange = -block\r\n self.xChange = 0\r\n self.direction = \"up\"\r\n self.surf = pygame.transform.scale(self.image[0], (block, block))\r\n if pressed_keys[K_DOWN]:\r\n if self.direction == \"up\":\r\n pass\r\n else:\r\n self.yChange = block\r\n self.xChange = 0\r\n self.direction = \"down\"\r\n self.surf = self.imgD\r\n if pressed_keys[K_LEFT]:\r\n if self.direction == \"right\":\r\n pass\r\n else:\r\n self.xChange = -block\r\n self.yChange = 0\r\n self.direction = \"left\"\r\n self.surf = self.imgL\r\n if pressed_keys[K_RIGHT]:\r\n if self.direction == \"left\":\r\n pass\r\n else:\r\n self.xChange = block\r\n self.yChange = 0\r\n self.direction = \"right\"\r\n self.surf = self.imgR\r\n\r\n # when snake passes the boundaries of the screen it will loop through to the opposite side\r\n if self.x >= dis_width:\r\n self.x = 0\r\n if self.x < 0:\r\n self.x = dis_width\r\n if self.y >= dis_height:\r\n self.y = 0\r\n if self.y < 0:\r\n self.y = dis_height\r\n\r\n # add the direction change based on button press\r\n self.x += self.xChange\r\n self.y += self.yChange\r\n\r\n self.head = []\r\n self.head.append(self.x)\r\n self.head.append(self.y)\r\n self.head.append(self.direction)\r\n self.list.append(self.head)\r\n\r\n #if list has more items than the length of snake delete first item in list\r\n if len(self.list) > self.length:\r\n del self.list[0]", "def update(self):\n self.wall_list.update()\n self.enemy_list.update()\n self.sludge.update()\n self.consumeable.update()\n self.can_climb.update()", "def gameloop(self):\n\n #Game folder\n game_folder = os.path.dirname(__file__)\n img_folder = os.path.join(game_folder, 'img')\n Background = pygame.image.load(os.path.join('Files', img_folder, 'background.jpg')).convert()\n\n # Clock/Timers\n clock = pygame.time.Clock() # Initiate clock object \n\n # Assign object sprite groups.\n fuel_sprites = pygame.sprite.Group()\n player_sprites = pygame.sprite.Group()\n bullet_sprites = pygame.sprite.Group()\n wall_sprites = pygame.sprite.Group()\n\n # Instancing players.\n player1 = Player1()\n player2 = Player2()\n\n # Instancing walls\n L_wall = Wall(5, HEIGHT_SCREEN/2, 10, HEIGHT_SCREEN)\n R_wall = Wall(WIDTH_SCREEN-5, HEIGHT_SCREEN/2, 10, HEIGHT_SCREEN)\n T_wall = Wall(WIDTH_SCREEN/2, 5, WIDTH_SCREEN, 10)\n B_wall = Wall(WIDTH_SCREEN/2,HEIGHT_SCREEN-5, WIDTH_SCREEN, 10)\n Mid_wall = Wall(WIDTH_SCREEN/2,HEIGHT_SCREEN/2, 10, HEIGHT_SCREEN/4)\n \n # add to sprite groups\n player_sprites.add(player1, player2)\n wall_sprites.add(L_wall, R_wall, T_wall, B_wall, Mid_wall)\n\n # Initiate timer for the fuel spawn\n fueltimer = time.time()\n\n # Start the loop\n while True:\n #Checks for events and if pressed the X in the corner the program will quit\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # Show logged functions, see Diagnostics/timing.py\n Profiler.showprofile()\n exit()\n \n # Lock fps\n time_passed = clock.tick(FPS)\n time_passed_seconds = time_passed/1000.0\n \n # Spawn fuel barrels\n timer = time.time()\n if fueltimer < timer:\n fueltimer += FUEL_TIMER\n fuel_sprites.add(FuelBarrel())\n\n\n # Check for collisions between sprites.\n collide = pygame.sprite.groupcollide(player_sprites, fuel_sprites, 0 , 1)\n collide_wall = pygame.sprite.groupcollide(player_sprites, wall_sprites, 1 , 0)\n collide_wall_bullets = pygame.sprite.groupcollide(bullet_sprites, wall_sprites, 1 , 0)\n collide_players_bullets = pygame.sprite.groupcollide(player_sprites, bullet_sprites ,1 , 1)\n collide_players_players = pygame.sprite.groupcollide(player_sprites, player_sprites ,0 , 0)\n\n # Assign negative points to both players if colliding\n for player in collide_players_players:\n if player != collide_players_players[player][0]:\n pygame.sprite.groupcollide(player_sprites, player_sprites ,1 , 1)\n self.point_assigner_loss(collide_players_players, player_sprites)\n\n # Assign Positive points to player which hit the other player\n self.point_assigner_win(collide_players_bullets, player_sprites)\n \n # Assign negative points for wall collision and if hit by player bullets\n self.point_assigner_loss(collide_players_bullets, player_sprites)\n self.point_assigner_loss(collide_wall, player_sprites)\n\n\n # Set background\n self.background(Background)\n\n # Update all the sprites\n player_sprites.update(collide, bullet_sprites)\n player_sprites.draw(SCREEN)\n fuel_sprites.update()\n fuel_sprites.draw(SCREEN)\n bullet_sprites.update()\n bullet_sprites.draw(SCREEN)\n wall_sprites.update()\n wall_sprites.draw(SCREEN)\n \n # Update screen\n pygame.display.update()", "def update(self, players):\n # if self._tick % 75 == 0:\n # pos = Vector2(100 + self._tick % 1240, -200)\n # radius = 50 + self._tick % 200\n # dir = Vector2(-5.5 + self._tick % 9, 2 + self._tick % 5)\n if self._tick % 25 == 0:\n pos = Vector2(((self._tick / 25) * 100) % 505, -100)\n radius = 50\n dir = Vector2(0, 4)\n self.obstacles.append(Obstacle(pos, radius, dir))\n\n self.obstacles = [\n Obstacle(add(obstacle.pos, obstacle.dir), obstacle.radius, obstacle.dir)\n for obstacle in self.obstacles\n if obstacle.pos.y < 550\n ]\n\n self._tick = self._tick + 1", "def runMaze(mazze, rectangles):\n global lvl\n global mobsKilled\n startx, starty, endx, endy = 0, 0, 0, 0\n startx, starty, endx, endy = startEndPoints(mazze)\n screen = pygame.display.set_mode((600,600,)) #pygame display object\n all_entities = pygame.sprite.LayeredUpdates() #sprite group of all sprites\n walls = pygame.sprite.LayeredUpdates() #sprite group for the walls\n players = pygame.sprite.GroupSingle() #sprite group for the player\n monstors = pygame.sprite.LayeredUpdates() #sprite group for the monsters\n exit = pygame.sprite.GroupSingle() #sprite group for the exit point\n floors = pygame.sprite.LayeredUpdates() #sprite group for the floors\n entry = pygame.sprite.GroupSingle() #sprite group for the entry point\n daggers = pygame.sprite.GroupSingle() #sprite group for the attacks\n done = False\n\n mobList = npc.main(rectangles, lvl)\n mobCounter = 0\n for i in mobList:\n i.setListPos(mobCounter)\n mobCounter += 1\n # i.displayStats()\n\n clock = pygame.time.Clock()\n\n x = copy.deepcopy(startx)\n y = copy.deepcopy(starty)\n\n total_level_width = 3000\n total_level_height = 3000\n global facing\n camera = Camera(complex_camera, total_level_width, total_level_height) #camera object used to generate the offsets\n playerOne = Player(x, y, facing) #player object and sprite\n playerOne.add(players)\n while not done:\n try:\n pygame.event.pump()\n except:\n None\n for event in pygame.event.get():\n pressed = pygame.key.get_pressed()\n if event.type == pygame.QUIT:\n done = True\n print(\"You Killed \" + str(mobsKilled) + \" Monsters.\")\n highScores.setHighScore( lvl-1, mobsKilled)\n pygameMazeDraw(screen, mazze, y, x, mobList, walls, monstors, exit, floors, entry) #generates the necessary sprites and objects to later be displayed\n pressed = pygame.key.get_pressed()\n if pressed[pygame.K_w] or pressed[pygame.K_s] or pressed[pygame.K_a] or pressed[pygame.K_d]:\n if pressed[pygame.K_w]:\n testSprite = Player(x,(y-1), facing)\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('wcollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # y -= 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n y -= 1\n #testSprite.add(players)\n players.update(testSprite.pos, facing)\n elif pressed[pygame.K_s]:\n testSprite = Player(x,(y+1), facing)\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('scollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # y += 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n y += 1\n #testSprite.add(players)\n players.update(testSprite.pos, facing)\n elif pressed[pygame.K_a]:\n testSprite = Player((x-1),y, 'left')\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('acollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # x -= 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n x -= 1\n #testSprite.add(players)\n players.update(testSprite.pos, 'left')\n elif pressed[pygame.K_d]:\n testSprite = Player((x+1),y, 'right')\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('dcollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # x += 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n x += 1\n #testSprite.add(players)\n players.update(testSprite.pos, 'right')\n (players.sprite).add(daggers)\n if pressed[pygame.K_UP] or pressed[pygame.K_DOWN] or pressed[pygame.K_LEFT] or pressed[pygame.K_RIGHT] or pressed[pygame.K_RSHIFT]:\n if pressed[pygame.K_UP] or pressed[pygame.K_RSHIFT]:\n dagger = Dagger(x, y-1, 'up')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n #pygame.sprite.groupcollide(daggers, monstors, True, True)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n elif pressed[pygame.K_DOWN]:\n dagger = Dagger(x, y+1, 'down')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n #pygame.sprite.groupcollide(daggers, monstors, True, True)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n elif pressed[pygame.K_LEFT]:\n dagger = Dagger(x-1, y, 'left')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n #pygame.sprite.groupcollide(daggers, monstors, True, True)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n elif pressed[pygame.K_RIGHT]:\n dagger = Dagger(x+1, y, 'right')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n\n #opens up the settings screen but need a way to clear it\n if pressed[pygame.K_o]:\n done = settingsscreen.main()\n # if done==True:\n # print(\"You Killed \" + str(mobsKilled) + \" Monsters.\")\n # highScores.setHighScore( lvl-1, mobsKilled)\n\n all_entities.add(walls)\n if daggers.sprite is None:\n all_entities.add(players)\n all_entities.add(daggers)\n all_entities.add(monstors)\n all_entities.add(exit)\n all_entities.add(floors)\n all_entities.add(entry)\n camera.update(players.sprite)\n #all_entities.draw(screen)\n for e in all_entities:\n screen.blit(e.image, camera.apply(e)) #Applies the offsets to the sprites and draws them to the screen\n\n screen.convert_alpha()\n pygame.display.flip() #updates the screen to show the changes\n all_entities.empty()\n walls.empty()\n floors.empty()\n\n\n clock.tick(100)\n screen.fill((0,0,0,))\n #pygame.event.clear()\n #I don't know where to put this that it would work\n # print(\"You Killed \" + str(mobsKilled) + \" Monsters.\")\n # highScores.setHighScore( lvl-1, mobsKilled)", "def move(self):\n assert self.is_alive, \"Sprite is dead, and should not be able to move\"\n if self.health > 3:\n self.y += random.randint(-1, 1) # change by -1, 0, 1\n self.x += random.randint(-1, 1) # change by -1, 0, 1\n print(self.name, \"moves to position\", str(self.x), \",\", str(self.y))", "def update():\n # Initialization (only runs on start/restart)\n player1 = Player()\n player2 = Player()\n\n walls, start = parse_level(LEVEL)\n\n player1.idx = 1\n player1.key_up = \"w\"\n player1.key_down = \"s\"\n player1.key_left = \"a\"\n player1.key_right = \"d\"\n player1.key_small = \"f\"\n player1.key_shoot = \"c\"\n\n player2.idx = 2\n player2.key_up = \"i\"\n player2.key_down = \"k\"\n player2.key_left = \"j\"\n player2.key_right = \"l\"\n player2.key_small = \"h\"\n player2.key_shoot = \"n\"\n\n reset_players = True\n # Main update loop\n while True:\n if reset_players:\n reset_players = False\n player1.centerx = start[0][0]\n player1.centery = start[0][1]\n player2.centerx = start[1][0]\n player2.centery = start[1][1]\n shots.clear()\n\n update_player(player1, delta())\n update_player(player2, delta())\n\n to_remove = []\n for shot in shots:\n if not update_shot(shot, delta()):\n to_remove.append(shot)\n for shot in to_remove:\n shots.remove(shot)\n\n draw_player(player1)\n draw_player(player2)\n for shot in shots:\n draw_shot(shot)\n\n for shot in shots:\n for other in shots:\n if shot.shooter_idx != other.shooter_idx:\n _, depth = overlap_data(shot, other)\n if depth > 0:\n to_remove.append(shot)\n for player in (player1, player2):\n _, depth = overlap_data(player, shot)\n if depth > 0 and player.idx != shot.shooter_idx:\n print(f\"{player.idx} ded by {shot.shooter_idx}\")\n reset_players = True\n for shot in to_remove:\n shots.remove(shot)\n\n for wall in walls:\n window = pg.display.get_surface()\n pg.draw.rect(window, pg.Color(100, 100, 100), wall)\n for player in (player1, player2):\n player.velocity, wall_vel, overlap = solve_rect_overlap(player,\n wall,\n player.velocity,\n mass_b=0,\n bounce=0.1)\n\n # Main loop ends here, put your code above this line\n yield", "def play_step(self, action):\n self.players[0].moving_left = False\n self.players[0].moving_right = False\n if action == MOVE_LEFT:\n self.players[0].moving_left = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_left = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == MOVE_RIGHT:\n self.players[0].moving_right = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_right = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == SHOOT:\n if self.dead_player or not self.players[0].is_alive:\n self.update(is_a_star=True)\n return\n if not self.players[0].weapon.is_active:\n self.players[0].shoot()\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n if self.dead_player or not self.players[0].is_alive:\n return", "def update(self):\r\n #calling animate function\r\n self.animate()\r\n #set vertical acceleration according to gravity constant\r\n self.acc = vec(0,PLAYERGRAV)\r\n \r\n #setting acceleration based on direction\r\n if self.moveLeft and self.rect.left > 0:\r\n self.acc.x = -PLAYERACC\r\n elif self.moveRight and self.rect.right < WINDOWWIDTH:\r\n self.acc.x = PLAYERACC\r\n\r\n #accelerate in x direction and slow down due to friction\r\n self.acc.x += self.vel.x*PLAYERFRIC\r\n self.vel += self.acc\r\n self.pos += self.vel + 0.5 *self.acc\r\n\r\n #position referenced from bottom of player\r\n self.rect.midbottom = self.pos" ]
[ "0.81336796", "0.7562641", "0.7411709", "0.73849124", "0.7197949", "0.7156386", "0.7143857", "0.7113507", "0.70476794", "0.7022667", "0.69586724", "0.695298", "0.6948688", "0.6880869", "0.6738882", "0.67356026", "0.6731809", "0.67010486", "0.6692721", "0.66918117", "0.66905886", "0.6677725", "0.6671822", "0.66379815", "0.6630717", "0.66296643", "0.662621", "0.66215587", "0.6598758", "0.6595843", "0.65952086", "0.6575588", "0.656839", "0.65629137", "0.6562573", "0.65556777", "0.6535348", "0.6522976", "0.65198517", "0.6519479", "0.650153", "0.6491989", "0.6491737", "0.6486866", "0.6485798", "0.64814174", "0.64766437", "0.64651376", "0.6459877", "0.64460385", "0.64418405", "0.6436542", "0.64352524", "0.6430505", "0.64196706", "0.6419658", "0.64139307", "0.6413322", "0.6408682", "0.6403692", "0.6403107", "0.64004225", "0.6399674", "0.63879913", "0.6376613", "0.6364956", "0.635964", "0.63596386", "0.63430476", "0.63412637", "0.6332842", "0.6329178", "0.6327438", "0.6309527", "0.6297785", "0.6297719", "0.6294092", "0.6293838", "0.62873733", "0.6287176", "0.6282469", "0.6281546", "0.62748796", "0.62726426", "0.62720937", "0.6271433", "0.62614113", "0.62580574", "0.62564135", "0.6256005", "0.6247931", "0.6233807", "0.62334114", "0.6225179", "0.62247694", "0.6224654", "0.6216944", "0.6211317", "0.62035143", "0.6202645", "0.6196686" ]
0.0
-1
Called whenever a key on the keyboard is pressed.
def on_key_press(self, key, modifiers): if not self.game.is_running: return if key == arcade.key.R: self.reset_level() try: game_key = KEY_MAP[key] except KeyError: return self.game.on_key_press(game_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_key_press(self, event):", "def on_key_event(self, key):\n pass", "def key_press_event(self, event):\n pass", "def _on_key_release(self, event):", "def keyPressEvent(self, event):\n self.game_engine.input_manager.keyPressEvent(event)", "def on_press(key):\n try:\n # gets pressed key char value and searches it from dict with get method.\n mapped_key = key_mappings.get(key.char) # gets value and type tuple or None\n if mapped_key:\n module.pressed_key = mapped_key\n except AttributeError:\n traceback.print_exc()\n except KeyboardInterrupt:\n print(f\"\\n{module.current_time()} Application stopped\")", "def ev_KEYUP(self, event):", "def on_key_press(self, key, modifiers):\n player_controller.input_press(self, key, self.player)", "def key_event(self, key: Any, action: Any):\n pass", "def k_press(self, key: KKey):\n pass", "def on_key_press(self, event):\n\n #print(\"you pressed {}\".format(event.key))\n key_press_handler(event, self.canvas, self.toolbar)", "def ev_KEYDOWN(self, event):", "def _handle_key_press(self, event: pygame.event.Event) -> None:\n if event.key == K_0:\n self._update_input('0')\n elif event.key == K_1:\n self._update_input('1')\n elif event.key == K_2:\n self._update_input('2')\n elif event.key == K_3:\n self._update_input('3')\n elif event.key == K_4:\n self._update_input('4')\n elif event.key == K_5:\n self._update_input('5')\n elif event.key == K_6:\n self._update_input('6')\n elif event.key == K_7:\n self._update_input('7')\n elif event.key == K_8:\n self._update_input('8')\n elif event.key == K_9:\n self._update_input('9')\n elif event.key == K_BACKSPACE:\n self._update_input('BACKSPACE')", "def on_key_down(self, keyboard, keycode, text, modifiers):\n Logger.debug('KeyDown Event: Keycode[1] is \"{}\"'.format(keycode[1]))\n self.keysPressed.add(keycode[1])", "def keyevent(self, keyname):\n self.adb.key_events(keyname)", "def on_keydown(self, keys, game) -> None:\n pass", "def keyPressEvent(self, event):\n self.Serial.send_keystroke(event.text())", "def ev_keydown(self, event: KeyDown) -> None:", "def key_handler(self):\n \n self.pressed = waitKey(1) & 255 #wait for keypress for 10 ms\n if self.pressed == 27: #exit program on 'esc'\n print \"exiting...\"\n self.camera.cam.release()\n exit()\n \n for key in self.key_controls.keys():\n if chr(self.pressed) == key:\n self.key_controls[key]()", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n\n self.key_handler(key)", "def keypress(self, key, state=None):\n\n\t\tself._interface.keypress(key, state)", "def _keyboard_input(self, pressedKey=None):\n\n if msvcrt.kbhit():\n if pressedKey == None:\n pressedKey = msvcrt.getch()\n if pressedKey == b'x' or pressedKey == b'X':\n self._goodbye()\n if pressedKey == b'c' or pressedKey == b'C':\n self.cameraOutput = not self.cameraOutput", "def keyevent(keyname, **kwargs):\n G.DEVICE.keyevent(keyname, **kwargs)\n delay_after_operation()", "def on_key_press(self, key: int, modifiers: int):\r\n self.held_keys.add(key)\r\n\r\n if key == arcade.key.SPACE:\r\n pass", "def key_handler(self, event):\n if event.type == pygame.KEYUP: \n self.done = True", "def _on_keyboard_down(self, keyboard, keycode, char, modifiers):\n\n print(f\"Keystroke: char={char}, code={keycode}, mods={modifiers}\")\n if keycode[0] == 27: # use the Escape key to toggle modes.\n self.toggle_speak_mode()\n elif self._speakmode == 'SAY_LETTERS':\n self.say_letter(keyboard, keycode, char, modifiers)\n else:\n self.say_word(keyboard, keycode, char, modifiers)\n return True", "def keypress(cls, _, key):\n return key", "def keypress(self, key): # pragma: no cover\n if key == \"s\":\n self.screenshot()\n\n elif key == \"q\" or key == \"Esc\":\n self.close()\n\n elif key == \"c\":\n self._print_camera()", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n if key == glfw.KEY_R:\n os.system(\"pkill aplay\")\n os.system(\"aplay T-Rex.wav &\")\n glfw.set_time(0)\n if key == glfw.KEY_N:\n self.normal_mapping = 1 - self.normal_mapping", "def keypress(self, event):\n events = {\n '1': lambda: self.slot.set(1),\n '2': lambda: self.slot.set(2),\n '6': lambda: self.digits.set(6),\n '8': lambda: self.digits.set(8),\n }\n try:\n events[event.keysym]()\n except KeyError:\n pass\n if event.keysym in ('1', '2', 'Return', 'Enter'):\n self.get_totp()\n self.root.wm_withdraw()", "def keyevent(self, keycode: Union[str, int]) -> None:\n self.shell(['input', 'keyevent', str(keycode)])", "def keyboard_changed( key, pressed ):\n\tglobal organ\n\t# Key from 0 to 7 are for corresponding notes in NOTES\n\tif 0<= key < len(KEYS):\n\t\t# Transform key index into Note letter\n\t\tnote = KEYS[key]\n\t\tif pressed:\n\t\t\torgan.play_note(note)\n\t\telse:\n\t\t\torgan.clear_note(note)", "def on_press(key):\n global key_pressed\n try:\n if key == keyboard.Key.enter:\n key_pressed = True\n # Stop listener\n return False\n except AttributeError:\n print('Unknown key {0} pressed'.format(key))", "def _on_keyboard(self, instance, key, scancode, codepoint, modifiers, *args):\r\n # print(\"Keyboard pressed! {}, {}, {}, {}\".format(key, scancode, codepoint, modifiers))\r\n if codepoint == 's' and 'ctrl' in modifiers:\r\n toast('Search by Name, Ingredient, or Tag', 3)\r\n self.search_focus = True", "def on_press(self, keyname):\n if self.keydown:\n return\n try:\n self.keydown = True\n keyname = str(keyname).strip('\\'')\n log.info('KEY PRESS ' + keyname)\n if keyname == 'Key.esc':\n self.toggle_tracking(False)\n # self.tracking = False\n self.drone.land()\n self.drone.quit()\n\n \n cv2.destroyAllWindows() \n os._exit(0)\n \n if keyname in self.controls_keypress:\n self.controls_keypress[keyname]()\n except AttributeError:\n log.debug(f'special key {keyname} pressed')", "def key_release_event(self, event):\n pass", "def key_press(self):\n self.screen.nodelay(True)\n return self.screen.getch()", "def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP or key == arcade.key.W:\n self.up_pressed = True\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.down_pressed = True\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.left_pressed = True\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.right_pressed = True\n\n if key == arcade.key.Q:\n self.shoot_pressed = True\n\n if key == arcade.key.PLUS:\n self.camera.zoom(0.01)\n elif key == arcade.key.MINUS:\n self.camera.zoom(-0.01)\n\n self.process_keychange()", "def handle_keydown(self, key, string):\r\n return app.App.handle_keydown(self, key, string)", "def on_press_show_key(key):\n print(f\"{key} pressed\")", "def on_key(self, event: events.Key) -> None:\n\n def press(button_id: str) -> None:\n \"\"\"Press a button, should it exist.\"\"\"\n try:\n self.query_one(f\"#{button_id}\", Button).press()\n except NoMatches:\n pass\n\n key = event.key\n if key.isdecimal():\n press(f\"number-{key}\")\n elif key == \"c\":\n press(\"c\")\n press(\"ac\")\n else:\n button_id = self.NAME_MAP.get(key)\n if button_id is not None:\n press(self.NAME_MAP.get(key, key))", "def keypress(key):\n k = PyKeyboard()\n if key == 'enter':\n key = k.return_key\n k.tap_key(key)", "def onkey(self, fun, key):\n if fun is None:\n if key in self._keys:\n self._keys.remove(key)\n elif key not in self._keys:\n self._keys.append(key)\n self._onkeyrelease(fun, key)", "def on_key_press(symbol, modifiers):\n if symbol == key.SPACE:\n world.next_step()", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n self.up_pressed = True\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.down_pressed = True\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.left_pressed = True\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.right_pressed = True\n elif key == arcade.key.SPACE:\n self.pc.punching = True\n\n self.process_keychange()", "def on_key(self, _window, key, _scancode, action, _mods):\n is_press = action == glfw.PRESS or action == glfw.REPEAT\n if is_press and (key == glfw.KEY_ESCAPE or key == glfw.KEY_Q):\n glfw.set_window_should_close(self.window, True)\n\n if action != glfw.REPEAT:\n self.key_handler(key, is_press)", "def handle_keyboard_input(self):\n keys = pg.key.get_pressed()\n\n if (keys[K_UP]):\n self.grid.change_direction(Direction.up)\n if (keys[K_DOWN]):\n self.grid.change_direction(Direction.down)\n if (keys[K_LEFT]):\n self.grid.change_direction(Direction.left)\n if (keys[K_RIGHT]):\n self.grid.change_direction(Direction.right)\n if (keys[K_SPACE]):\n self.grid.snake.grow()\n if (keys[K_RIGHTBRACKET]):\n self.actions_per_second += 1\n if (keys[K_LEFTBRACKET]):\n self.actions_per_second -= 1\n if (keys[K_t]):\n self.is_training = True\n print(\"========================================================================\")\n print(\"Training: ON\")\n print(\"========================================================================\")\n if (keys[K_s]):\n self.is_training = False\n print(\"========================================================================\")\n print(\"Training: OFF\")\n print(\"========================================================================\")", "def HandleKeyboardInput(self):\n key = yg.getKeyPress()\n if key == \"Return\":\n self.buttons[len(self.buttons) - 1].Click()", "def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.up_pressed = True\n elif key == arcade.key.DOWN:\n self.down_pressed = True\n elif key == arcade.key.LEFT:\n self.left_pressed = True\n elif key == arcade.key.RIGHT:\n self.right_pressed = True", "def _on_pyglet_keypress(self, symbol, modifiers, emulated=False,\n isPress=True):\n key_time = clock()\n if emulated:\n this_key = str(symbol)\n else:\n from pyglet.window import key\n this_key = key.symbol_string(symbol).lower()\n this_key = this_key.lstrip('_').lstrip('NUM_')\n press_or_release = {True: 'press', False: 'release'}[isPress]\n self._keyboard_buffer.append((this_key, key_time, press_or_release))", "def keypress_signal_from_behaviors_coding_map(self, event):\n self.keyPressEvent(event)", "def input_key_event(self, key, custom_key=None):\n\n key_event = INPUT_ACTION_SWITCHER.get(key)\n if key_event == \"-1\":\n key_event = custom_key\n self.android_device_driver.adb.exec_adb_cmd(\"shell input keyevent \" +\n key_event).wait()", "def on_key_press(self, key, callback):\n self._key_press_mappings.setdefault(key, []).append(callback)", "def _on_key_press(self, key):\n if key is self.TRIGGER_KEY and not self.do_record:\n print(\"Start Recording...\")\n self.do_record = True", "def slot_keypress(self, gox, (key)):\r\n pass", "def ev_keyup(self, event: KeyUp) -> None:", "def on_press(self, pressed_key):\n if pressed_key is not None:\n if isinstance(pressed_key, pynput.keyboard.KeyCode) and pressed_key.char is not None:\n pressed_key = pressed_key.char.lower()\n elif isinstance(pressed_key, pynput.keyboard.Key):\n pressed_key = pressed_key.name\n self.keys_set.add(pressed_key)", "def handle_keys(self):\n c = self.scr.getch() # Get a keystroke\n if c == curses.KEY_RESIZE:\n self.resize()\n return\n if 0 < c < 256:\n c = chr(c)\n # Digits are commands without a modifier\n try:\n found_digit = c.isdigit()\n except AttributeError:\n # Since .isdigit() doesn't exist if c > 256, we need to catch the\n # error for those keys.\n found_digit = False\n if found_digit and (len(self.modifier) > 0 or c not in self.keys):\n self.handle_modifier(c)\n elif c in self.keys:\n self.keys[c]()\n else:\n self.modifier = str()", "def on_key_press(self, key):\n if key == 'esc':\n self.backtrack()\n elif key in ['f1', '?']:\n self.open(HelpPane(self._get_current_pane()))", "def on_key(window, key, scancode, action, mods):\n if action != glfw.PRESS:\n return\n \n global controller\n\n if key == glfw.KEY_SPACE:\n controller.fillPolygon = not controller.fillPolygon\n\n elif key == glfw.KEY_ESCAPE:\n glfw.set_window_should_close(window, True)\n\n # Si detecta la tecla [Q] cambia el estado del efecto 1 : zoom\n elif key == glfw.KEY_Z:\n controller.effect1 = not controller.effect1\n\n # Si detecta la tecla [W] cambia el estado del efecto 2 : corte\n elif key == glfw.KEY_C:\n controller.effect2 = not controller.effect2\n\n else:\n print('Unknown key')", "def on_key_release(self, key, modifiers):\n player_controller.input_release(key, self.player)", "def on_key_press(self, event):\n # F2 for starting new game\n if event.key == 'f2':\n self.draw_minefield()", "def on_key_press(self, key, modifiers):\n KeyboardController.lastKey = key;\n KeyboardController.keys.add(key);\n if key == arcade.key.ESCAPE:\n # User hits f. Flip between full and not full screen.\n self.set_fullscreen(not self.fullscreen)\n\n # Get the window coordinates. Match viewport to window coordinates\n # so there is a one-to-one mapping.\n width, height = self.get_size()\n self.set_viewport(0, width, 0, height)", "def key_press_event(self, widget, event):\n # get keyname or keycode and translate to ginga standard\n # keyname =\n # keycode =\n keyname = '' # self.transkey(keyname, keycode)\n self.logger.debug(\"key press event, key=%s\" % (keyname))\n return self.make_ui_callback('key-press', keyname)", "def handle_movement_keydown(self, key):\n try:\n log.debug(f'pressed: {key}')\n if key == pygame.K_LEFT:\n self.walk_left()\n elif key == pygame.K_RIGHT:\n self.walk_right()\n elif key == pygame.K_DOWN:\n pass\n elif key == pygame.K_UP:\n pass\n elif key == pygame.K_SPACE:\n self.jump()\n self.keys_down[key] = True\n except AttributeError:\n log.info(\"you didn't pass a keyboard event!!\")", "def __handleKeyDown(self, key):\n self._keysPressed += 1\n if self._keyDown is None and self._keysPressed == 1:\n assert(self.notify.debug(\"Key Down for Pattern: \" + key))\n self.__updateElapsedTime()\n # Inform that a key has been pressed\n messenger.send(KeyCodes.KEY_DOWN_EVENT, [self._keyMap[key], self._keyCodeCount])\n \n self._keyCode += self._keyMap[key]\n self._keyCodeCount += 1\n self._keyDown = key\n self.__checkForPattern()\n else:\n messenger.send(KeyCodes.KEY_DOWN_EVENT, [-1, -1])", "def keyboard(self, *args):\n return _ida_hexrays.Hexrays_Hooks_keyboard(self, *args)", "def getKeyboardInput(self, keysPressed):\n self.keyboardInput = keysPressed", "def keyboard_on_key_up(self, window, keycode):\n if 'shift' in keycode[1]:\n self.shift_down = False", "def on_key_press(self, key, modifiers):\n if self.player_enabled:\n super().on_key_press(key, modifiers)", "def _onKeyPress(self, widget, event):\n\t\t#print event.hardware_keycode\n\t\tif event.hardware_keycode in [102,100,98,104] and self.fullscreenToggle:\n\t\t\t# Binding: arrow keys: move image (Fs)\n\t\t\t# Binding: arrow keys+Shift: move image faster (Fs)\n\t\t\tif event.state & gtk.gdk.SHIFT_MASK:\n\t\t\t\tamount = 60\n\t\t\telse:\n\t\t\t\tamount = 15\n\t\t\t\n\t\t\tif event.hardware_keycode == 102:\n\t\t\t\tself.moveImage( self.imgPos[0] + amount, self.imgPos[1] )\n\t\t\telif event.hardware_keycode == 100:\n\t\t\t\tself.moveImage( self.imgPos[0] - amount, self.imgPos[1] )\n\t\t\telif event.hardware_keycode == 98:\n\t\t\t\tself.moveImage( self.imgPos[0], self.imgPos[1] - amount )\n\t\t\telif event.hardware_keycode == 104:\n\t\t\t\tself.moveImage( self.imgPos[0], self.imgPos[1] + amount )\n\t\telif event.hardware_keycode == 36:\n\t\t\t# Binding: Return: Next image\n\t\t\tself.next()\n\t\telif event.hardware_keycode == 99:\n\t\t\t# Binding: PgUp: +10 images\n\t\t\tself.position = (self.position + 9) % len(self.fileList)\n\t\t\tself.next()\n\t\telif event.hardware_keycode == 105:\n\t\t\t# Binding: PgDwn: -10 images\n\t\t\tself.position = (self.position - 9) % len(self.fileList)\n\t\t\tself.previous()\n\t\telif event.hardware_keycode == 22:\n\t\t\t# Binding: Backspace: Previous image\n\t\t\tself.previous()\n\t\telif event.hardware_keycode == 9:\n\t\t\t# Binding: Escape: Quit\n\t\t\tself.exit()\n\t\telif event.hardware_keycode == 86:\n\t\t\t# numpad +\n\t\t\tself.zoom( 0.1 );\n\t\telif event.hardware_keycode == 82:\n\t\t\t# numpad -\n\t\t\tself.zoom( -0.1 );\n\t\telif event.keyval < 256:\n\t\t\tif chr(event.keyval) == \" \":\n\t\t\t\t# Binding: Space: Next image\n\t\t\t\tself.next()\n\t\t\telif chr(event.keyval) == \"q\":\n\t\t\t\t# Binding: q: quit\n\t\t\t\tself.exit()\n\t\t\telif chr(event.keyval) == \"f\":\n\t\t\t\t# Binding: f: Toggle fullscreen\n\t\t\t\tself.fullscreenToggle = not self.fullscreenToggle\n\t\t\t\tif self.fullscreenToggle:\n\t\t\t\t\t#self.showCursor(False)\n\t\t\t\t\tself.fullscreen()\n\t\t\t\telse:\n\t\t\t\t\tself.showCursor(True)\n\t\t\t\t\tself.unfullscreen()\n\t\t\t\tself.autoScale()\n\t\t\t\tdef reload():\n\t\t\t\t\tself.loadImage()\n\t\t\t\tgobject.timeout_add(5, reload)\n\t\t\telif chr(event.keyval) == \"+\":\n\t\t\t\t# Binding: +: Zoom in\n\t\t\t\tself.zoom( 0.1 )\n\t\t\t\tself.scaleFactor += 0.1\n\t\t\t\tself.display()\n\t\t\t\tself.autoResize()\n\t\t\t\tgobject.timeout_add(10, self.display)\n\t\t\t\tself.setTitle(\"Zoom +\")\n\t\t\telif chr(event.keyval) == \"-\":\n\t\t\t\t# Binding: -: Zoom out\n\t\t\t\tself.zoom( -0.1 )\n\t\t\t\tif self.scaleFactor > 0.1:\n\t\t\t\t\tself.scaleFactor -= 0.1\n\t\t\t\tself.display()\n\t\t\t\tself.autoResize()\n\t\t\t\tgobject.timeout_add(10, self.display)\n\t\t\t\tself.setTitle(\"Zoom -\")\n\t\t\telif chr(event.keyval) == \"r\":\n\t\t\t\t# Binding: r: Reload\n\t\t\t\tself.loadImage()\n\t\t\telif chr(event.keyval) == \"t\":\n\t\t\t\t# Binding: t: Toggle autoscale\n\t\t\t\tself.autoscaleToggle = not self.autoscaleToggle\n\t\t\t\tself.loadImage()\n\t\t\t\tgobject.timeout_add(5, self.display)\n\t\t\t\tif self.autoscaleToggle:\n\t\t\t\t\tself.setTitle(\"Autoscale\")\n\t\t\t\telse:\n\t\t\t\t\tself.setTitle(\"Autoscale disabled\")\n\t\t\telif chr(event.keyval) == \"l\":\n\t\t\t\t# Binding: l: Rotate left\n\t\t\t\tself.rotate(gtk.gdk.PIXBUF_ROTATE_COUNTERCLOCKWISE)\n\t\t\t\tself.display()\n\t\t\t\tself.setTitle(\"Rotate left\")\n\t\t\telif chr(event.keyval) == \"k\":\n\t\t\t\t# Binding: k: Rotate right\n\t\t\t\tself.rotate(gtk.gdk.PIXBUF_ROTATE_CLOCKWISE)\n\t\t\t\tself.display()\n\t\t\t\tself.setTitle(\"rotate right\")\n\t\t\telif chr(event.keyval) == \"s\":\n\t\t\t\t# Binding: s: Toggle slideshow\n\t\t\t\tif self.slideshowRef:\n\t\t\t\t\tgobject.source_remove(self.slideshowRef)\n\t\t\t\t\tself.slideshowRef = None\n\t\t\t\t\tself.setTitle(\"Slideshow disabled\")\n\t\t\t\telse:\n\t\t\t\t\tself.slideshowRef = gobject.timeout_add(self.slideInterval, self.next)\n\t\t\t\t\tself.setTitle(\"Slideshow\")\n\t\t\telif chr(event.keyval) == \"h\":\n\t\t\t\t# Binding: h: Flip horizontally\n\t\t\t\tself.flip(True)\n\t\t\t\tself.display()\n\t\t\t\tself.setTitle(\"Flip horizontal\")\n\t\t\telif chr(event.keyval) == \"v\":\n\t\t\t\t# Binding: v: Flip vertically\n\t\t\t\tself.flip(False)\n\t\t\t\tself.display()\n\t\t\t\tself.setTitle(\"Flip vertical\")\n\t\t\telif chr(event.keyval) == \"a\":\n\t\t\t\t# Binding: a: Copy image to .qiv-select\n\t\t\t\tif not os.path.isdir(\".qiv-select\"):\n\t\t\t\t\tos.mkdir(\".qiv-select\")\n\t\t\t\tshutil.copyfile(self.fileList[self.position], \n\t\t\t\t\tos.path.join(\".qiv-select/\", os.path.basename(self.fileList[self.position])))\n\t\t\t\tself.setTitle(\"Copy saved\")\n\t\t\telif chr(event.keyval) == \"i\":\n\t\t\t\t# Binding: i: show infoscreen\n\t\t\t\tself.infoVisible = not self.infoVisible\n\t\t\t\tif self.infoVisible:\n\t\t\t\t\tself.infoLabelBox.show()\n\t\t\t\telse:\n\t\t\t\t\tself.infoLabelBox.hide()", "def key_up(key):\n\n vk = key\n # XXX exception if >= 256\n _key_up(vk)", "def keyPressEvent(self, event):\n if event.key() not in self.inputs.keys():\n self.inputs[event.key()] = [True, 0]\n # end if not in dict, add key to dict\n self.inputs[event.key()][0] = True\n\n for game_object in self.game_engine.game_objects:\n game_object.key_press_event(event)\n # end for", "def keyPressed():\n global PLAY\n if (key == ' '):\n PLAY = not PLAY\n if (key == 'r'):\n init()", "def on_key_press(self, key_pressed: int, _: int) -> None:\n if key_pressed in (key.UP, key.W):\n if self.physics_engine.can_jump():\n self.change_y = self.jump_speed\n elif key_pressed in (key.LEFT, key.A):\n self.change_x = -self.movement_speed\n self.direction = Direction.LEFT\n self.last_faced_dir = \"left\"\n self.texture = self.textures[Direction.LEFT.value]\n elif key_pressed in (key.RIGHT, key.D):\n self.change_x = self.movement_speed\n self.direction = Direction.RIGHT\n self.last_faced_dir = \"right\"\n self.texture = self.textures[Direction.RIGHT.value]", "def on_key_release(self, symbol, modifiers):\n self.gamestatemanager.peek().on_key_release(symbol, modifiers, self.config_data[\"controls\"])", "def on_key_down(self, keycode, keyvalue, event):\n if self.__click == True and (len(gtk.gdk.keyval_name(event.keyval)) < 2 or gtk.gdk.keyval_name(event.keyval) == \"space\"):\n if gtk.gdk.keyval_name(event.keyval) == \"space\":\n self.__text = self.__text + \" \";\n else:\n self.__text = self.__text + gtk.gdk.keyval_name(event.keyval);\n if gtk.gdk.keyval_name(event.keyval) == \"BackSpace\" and self.__text:\n self.__text = self.__text[:-1];\n if gtk.gdk.keyval_name(event.keyval) == \"Return\" or self.__click == False and self.__text:\n self.addNew();\n\t\t\t#screenlets.show_message(self, \"Committed\");", "def press_key(self, event):\n if self.active:\n keycode = self.mapping[event.pin_num]\n while self.busy:\n sleep(0.01)\n self.busy = True\n self.send_key(keycode)\n self.busy = False", "def on_press(self, key):\n try:\n if 'up' == key.name:\n if self.index > 0:\n self.index -= 1\n else:\n self.index = self.count\n elif 'down' == key.name:\n if self.index < self.count:\n self.index += 1\n else:\n self.index = 0\n elif key == Key.enter:\n # Stop listener\n self.flag += 1\n return False\n except:\n return False", "def SetKeyEvent(self, event):\r\n\r\n self._evtKey = event", "def key_press(keys):\n return lambda e: e.key if e.type == pygame.KEYDOWN \\\n and e.key in keys else EventConsumerInfo.DONT_CARE", "def on_key_press(event):\n if event.key == 'shift':\n self.shift_is_held = True", "def on_key_press(self, event):\n if self.active:\n key = event.key or ''\n key = key.replace('ctrl', 'control')\n if key == self._state_modifier_keys['clear']:\n self.clear()\n return\n for (state, modifier) in self._state_modifier_keys.items():\n if modifier in key.split('+'):\n # 'rotate' is changing _state on press and is not removed\n # from _state when releasing\n if state == 'rotate':\n if state in self._state:\n self._state.discard(state)\n else:\n self._state.add(state)\n else:\n self._state.add(state)\n self._on_key_press(event)", "def signal_from_subjects_pad(self, event):\n self.keyPressEvent(event)", "def handle_keyboard_data(data):\n pass", "def hit_enter():\n keyboard.press_and_release('Enter')", "def keyboard_on_key_down(self, window, keycode, text, modifiers):\n if 'shift' in keycode[1]:\n self.shift_down = True", "async def sendKeyPress(self, key):\n key = str(key)\n await self.director.sendPostRequest(\n \"/api/v1/items/{}/commands\".format(self.item_id),\n \"KEY_PRESS\",\n {\"KeyName\": key},\n )", "def key_hooks(self, key):\n if key == pygame.K_LEFT:\n self.bias[0] -= 10\n elif key == pygame.K_RIGHT:\n self.bias[0] += 10\n elif key == pygame.K_DOWN:\n self.bias[1] += 10\n elif key == pygame.K_UP:\n self.bias[1] -= 10\n elif key == pygame.K_EQUALS:\n self.scale[0] += 5\n self.scale[1] += 5\n self.scale[2] += 5\n elif key == pygame.K_MINUS:\n self.scale[0] -= 5\n self.scale[1] -= 5\n self.scale[2] -= 5\n elif key == pygame.K_q:\n self.view_angle[0] += .1\n elif key == pygame.K_w:\n self.view_angle[0] -= .1\n elif key == pygame.K_a:\n self.view_angle[1] += .1\n elif key == pygame.K_s:\n self.view_angle[1] -= .1\n elif key == pygame.K_z:\n self.view_angle[2] += .1\n elif key == pygame.K_x:\n self.view_angle[2] -= .1", "def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.set_exclusive_mouse(False)\n else:\n self.gamestatemanager.peek().on_key_press(symbol, modifiers, self.config_data[\"controls\"])", "def notify(self, sender, key, key2=b'\\x00'):\r\n\r\n EventListener.notify(self, sender, KeyPressEventArgs(key, key2))", "def on_key_press(self, key, key_modifiers):\n if key == arcade.key.LEFT or key == arcade.key.DOWN:\n self.holding_left = True\n\n if key == arcade.key.RIGHT or key == arcade.key.UP:\n self.holding_right = True", "def handle_event(self, event, window):\n raise NotImplementedError('handle_key')", "def presskey(self, key):\n \"\"\"Method to press any key\n Need to add further code for other keys based on requirements\"\"\"\n action = ActionChains(self.driver)\n action.send_keys(key)\n action.perform()", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n self.direction = MoveEnum.UP\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.direction = MoveEnum.DOWN\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.direction = MoveEnum.LEFT\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.direction = MoveEnum.RIGHT\n elif key == arcade.key.SPACE:\n self.shoot()", "def on_key_press(self, key: str):\n if key == \"down\":\n self.selection_index += 1\n self.selection_index %= len(self.OPTIONS)\n self.draw()\n elif key == \"up\":\n self.selection_index -= 1\n self.selection_index %= len(self.OPTIONS)\n self.draw()\n elif key == \"enter\":\n self.OPTIONS[self.selection_index][1]()\n self.draw()", "def keyReleaseEvent(self, event):\n self.game_engine.input_manager.keyReleaseEvent(event)", "def key_press_callback(data):\n global D\n message = data.data # that's the string\n D.last_keypress = message\n # we'll handle stuff here...\n k = D.last_keypress\n\n if k in ' ': \n D.robot_publisher.publish( \"toggle commands\" ) # Wow!\n if k in 'W': # 'W' goes to the waiting state\n D.robot_publisher.publish( \"D.tank(0,0)\" ) # Yay, Python!\n D.STATE = \"WAITING_TO_START\" # back to waiting to start", "def key(event):\n nonlocal button_save_string\n self.is_binding = False\n try:\n if event.keysym == 'space':\n initialise_control_button(button, button_save_string)\n return\n button['text'] = str(event.keysym)\n set_command_for(button_save_string, event.keysym)\n\n except Exception:\n if event.char == 'space':\n initialise_control_button(button, button_save_string)\n return\n button['text'] = str(event.char)\n set_command_for(button_save_string, event)\n\n unbind_keys(button)", "def __acceptKeyDown(self, key):\n self.accept(key, self.__handleKeyDown, [key])" ]
[ "0.84874815", "0.8405655", "0.82012695", "0.81760395", "0.80712044", "0.799939", "0.79385316", "0.7934922", "0.7907901", "0.79031485", "0.78694284", "0.77583754", "0.7742568", "0.76565206", "0.7616181", "0.7597726", "0.75769305", "0.7515888", "0.73805565", "0.73772943", "0.7335729", "0.7262355", "0.72391057", "0.7233868", "0.7230385", "0.7229613", "0.72188556", "0.72103745", "0.7199378", "0.7198658", "0.71978843", "0.7188627", "0.718192", "0.7160231", "0.7157947", "0.7143948", "0.71415436", "0.7138843", "0.7135565", "0.71303993", "0.7089814", "0.7071247", "0.70606965", "0.7056595", "0.70443016", "0.7042936", "0.7039367", "0.7028357", "0.70164806", "0.7011686", "0.6991126", "0.69804686", "0.6971055", "0.6959817", "0.6954503", "0.69473857", "0.6946486", "0.6933573", "0.6906418", "0.6902636", "0.6892438", "0.68922496", "0.6891822", "0.68911844", "0.68787295", "0.68625915", "0.6859", "0.68479544", "0.6838417", "0.68321794", "0.68203664", "0.6819467", "0.6803858", "0.6773763", "0.67708373", "0.67685705", "0.67673093", "0.6766309", "0.67607903", "0.6753852", "0.6746521", "0.672933", "0.6726354", "0.67116153", "0.6694529", "0.6660718", "0.66574955", "0.6656741", "0.6649085", "0.66479504", "0.6644328", "0.6633746", "0.6632993", "0.66290057", "0.6628342", "0.66274834", "0.66224605", "0.6616046", "0.66134435", "0.66066974" ]
0.69576883
54
Called whenever the user lets off a previously pressed key.
def on_key_release(self, key, modifiers): try: game_key = KEY_MAP[key] except KeyError: return self.game.on_key_release(game_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_key_release(self, event):", "def off(key):\n # print(\"{0} released\".format(key), time.perf_counter())\n\n global keys, esc_count\n\n # caps, shift, etc. aren't automatically registered as strings\n if type(key) == Key:\n keys[esc_count].append((str(key), time.perf_counter(), \"released\"))\n else:\n keys[esc_count].append((key, time.perf_counter(), \"released\"))", "def unlock(self):\n self.shell(\"input keyevent MENU\")\n self.shell(\"input keyevent BACK\")", "def key_release_event(self, event):\n pass", "def on_key_release(event):\n if event.key == 'shift':\n self.shift_is_held = False", "def keyboard_on_key_up(self, window, keycode):\n if 'shift' in keycode[1]:\n self.shift_down = False", "def ev_KEYUP(self, event):", "def on_key_release(self, key, modifiers):\n self.key_pressed = False\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n if self.player.get_last_side() == \"left\":\n self.player.set_action(\"left_idle\")\n else:\n self.player.set_action(\"right_idle\")", "def _on_key_press(self, event):", "def on_key_release(self, symbol, modifiers):\n self.gamestatemanager.peek().on_key_release(symbol, modifiers, self.config_data[\"controls\"])", "def on_key_release(self, key, modifiers):\n pass # stop animation", "def on_key_release(self, key, modifiers):\n\n if key == arcade.key.UP or key == arcade.key.W:\n self.up_pressed = False\n self.jump_needs_reset = False\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.down_pressed = False\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.left_pressed = False\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.right_pressed = False\n\n if key == arcade.key.Q:\n self.shoot_pressed = False\n\n self.process_keychange()", "def key_handler(self, event):\n if event.type == pygame.KEYUP: \n self.done = True", "def keyReleaseEvent(self, event):\n self.game_engine.input_manager.keyReleaseEvent(event)", "def on_key_release(self, key, modifiers):\n\n if key == arcade.key.UP or key == arcade.key.W:\n self.up_pressed = False\n self.jump_needs_reset = False\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.down_pressed = False\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.left_pressed = False\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.right_pressed = False\n elif key == arcade.key.SPACE:\n self.pc.punching = False\n\n self.process_keychange()", "def on_key_release(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.up_pressed = False\n elif key == arcade.key.DOWN:\n self.down_pressed = False\n elif key == arcade.key.LEFT:\n self.left_pressed = False\n elif key == arcade.key.RIGHT:\n self.right_pressed = False", "def on_key_release(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.up_pressed = False\n elif key == arcade.key.DOWN:\n self.down_pressed = False\n elif key == arcade.key.LEFT:\n self.left_pressed = False\n elif key == arcade.key.RIGHT:\n self.right_pressed = False", "def ev_KEYDOWN(self, event):", "def key_down(key):\n vk = key\n # XXX exception if >= 256\n _key_down(vk)", "def OnModeOff(self, event):\n\n\t\t#~ raw_code = event.GetRawKeyCode()\n\t\tmodifiers = event.GetModifiers()\n\n\t\t#~ if raw_code == 75 and modifiers==3:\n\t\t\t#~ self.Close()\n\t\tif modifiers==2:\n\t\t\tself.mode = \"\"\n\t\tprint \"ctrl up....\\n\"", "def on_key_up(self, keyboard, keycode):\n Logger.debug('KeyUp Event: Keycode[1] is \"{}\"'.format(keycode[1]))\n self.keysPressed.remove(keycode[1])", "def key_toggle():\n toggle_main_off()\n lcd.message = format_lcd_message(\n TITLE,\n f\"Keys enabled: {keys_enabled}\",\n \"\",\n \"Toggle Back\"\n )\n switchLight.red.on()\n switchLight.blue.on()\n\n switch.green.wait_for_release()\n\n switch.red.when_pressed = toggle_keys\n\n switch.blue.wait_for_press()\n\n # Blue light pressed - reset and drop out of diagnostics mode\n toggle_main_on()\n update_display(last_result)", "def back(self):\n self.input_key_event(InputActions.BACK)", "def on_key_press(self, event):\n if self.active:\n key = event.key or ''\n key = key.replace('ctrl', 'control')\n if key == self._state_modifier_keys['clear']:\n self.clear()\n return\n for (state, modifier) in self._state_modifier_keys.items():\n if modifier in key.split('+'):\n # 'rotate' is changing _state on press and is not removed\n # from _state when releasing\n if state == 'rotate':\n if state in self._state:\n self._state.discard(state)\n else:\n self._state.add(state)\n else:\n self._state.add(state)\n self._on_key_press(event)", "def on_key_event(self, key):\n pass", "def unbind_keys(button):\n button.unbind('<Key>')\n button.unbind('<Left>')\n button.unbind('<Up>')\n button.unbind('<Right>')\n button.unbind('<Down>')", "def handle_keyhold(self, key, string):\r\n return app.App.handle_keyhold(self, key, string)", "def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.set_exclusive_mouse(False)\n else:\n self.gamestatemanager.peek().on_key_press(symbol, modifiers, self.config_data[\"controls\"])", "def on_key_release(self, event):\n if self.active:\n key = event.key or ''\n for (state, modifier) in self._state_modifier_keys.items():\n # 'rotate' is changing _state on press and is not removed\n # from _state when releasing\n if modifier in key.split('+') and state != 'rotate':\n self._state.discard(state)\n self._on_key_release(event)", "def _inactive(self):\n if self._new_key_press():\n self._state = STATE_NEWGAME\n self._mssg = None", "def on_key_release(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.up_pressed = False\n elif key == arcade.key.DOWN:\n self.down_pressed = False\n elif key == arcade.key.LEFT:\n self.left_pressed = False\n elif key == arcade.key.RIGHT:\n self.right_pressed = False\n elif key == arcade.key.W:\n self.up_pressed = False\n elif key == arcade.key.S:\n self.down_pressed = False\n elif key == arcade.key.A:\n self.left_pressed = False\n elif key == arcade.key.D:\n self.right_pressed = False", "def keyUp(self):\n if pyxel.btnp(pyxel.KEY_UP):\n self.rotater(-1)", "def on_key_press(event):\n if event.key == 'shift':\n self.shift_is_held = True", "def check_keyDown(event, ai_settings, screen, player, projectiles):\n if event.key == pygame.K_UP:\n player.moving_up = True\n elif event.key == pygame.K_DOWN:\n player.moving_down = True\n elif event.key == pygame.K_SPACE:\n player.moving_right = True\n player.not_moving = False\n elif event.key == pygame.K_v:\n fire_laser(ai_settings, screen, player, projectiles)\n elif event.key == pygame.K_q:\n sys.exit()", "def on_key_release(self, key, modifiers):\n player_controller.input_release(key, self.player)", "def on_key_release(self, key):\n if key == LEFT:\n self.player.change_x = 0\n elif key == RIGHT:\n self.player.change_x = 0\n elif key == UP:\n self.player.change_y = 0\n elif key == DOWN:\n self.player.change_y = 0", "def modifierUp(self, event):\n if event.keysym in ['Shift_L', 'Shift_R', 'Control_L', 'Control_R',\n 'Alt_L', 'Alt_R']:\n self.kbdModifier[event.keysym] = 0\n # release the grab. Release must be done on button release event\n # this is the Problem. if SHFT is released before button we loose\n # button motion and button release events after that.\n # Seems that a solution to this would require this object to also\n # monitor mouse buttons and release the grab after the last release\n # of either the button or the modifier.\n self.master.grab_release()\n\n #if self.oldgrab:\n # self.oldgrab.grab.set()\n\t self.keybdModifierCallbacksUp[event.keysym].CallCallbacks(event)", "def reset_keys(self):\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY = False, False, False, False", "def reset_keys(self):\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY = False, False, False, False", "def OnKeyDown(self,event):\n\n\t\traw_code = event.GetRawKeyCode()\n\t\tmodifiers = event.GetModifiers()\n\n\t\t#~ if raw_code == 75 and modifiers==3:\n\t\t\t#~ self.Close()\n\t\tif raw_code == 75 and modifiers==2:\n\t\t\tself.debug_out.SetValue(\"\")", "def volume_down(self):\n self.handleCommand(25)", "def __backToIdleWithBackPress(self, long = False):\r\n if long:\r\n self.phone.select.long('KBD_KEY_BACK', doNotReport = True)\r\n else:\r\n self.phone.select('KBD_KEY_BACK', doNotReport = True)", "def key_press_event(self, event):\n pass", "def on_key_release(self, key_released: int, _: int) -> None:\n if key_released in (key.LEFT, key.RIGHT, key.A, key.D):\n self.change_x = 0\n self.direction = None", "def on_up_key(self, event) -> None:\r\n\r\n self.move_view(0, -1)", "def __handleKeyUp(self, key):\n arg = -1\n if self._keysPressed > 0:\n self._keysPressed -= 1\n if self._keyDown == key and self._keysPressed == 0:\n arg = self._keyMap[key]\n \n if self._keysPressed == 0:\n self._keyDown = None\n \n messenger.send(KeyCodes.KEY_UP_EVENT, [arg])", "def on_release(self):\n self.pressed = False", "def on_release(self):\n self.pressed = False", "def handle_single_key(self, event):\n key = event.key\n if key == pygame.K_f:\n self.followmode = not self.followmode\n elif self.followmode and key in (pygame.K_w, pygame.K_UP):\n self.follownum += 1\n elif self.followmode and key in (pygame.K_s, pygame.K_DOWN):\n self.follownum -= 1\n elif key == pygame.K_ESCAPE:\n exit()", "def timerUp(self):\n self.setDown(False)", "def on_key_release(self, key: int, modifiers: int):\r\n if key in self.held_keys:\r\n self.held_keys.remove(key)", "def on_key_release(self, key: int, modifiers: int):\r\n if key in self.held_keys:\r\n self.held_keys.remove(key)", "def keyReleaseEvent(self, event):\n # The autorepeat debounces\n if not event.isAutoRepeat():\n if event.key() == Qt.Key_Up or event.key() == Qt.Key_Down or (\n event.key() == Qt.Key_Left) or event.key() == Qt.Key_Right:\n self.notifyObservers(BehavioralStates.RC, (Qt.Key_Slash, \"0\"))\n # this is so the next time we press w we know it's a new key\n elif event.key() == Qt.Key_W:\n self.notifyObservers(BehavioralStates.RC, (Qt.Key_Q, \"0\"))", "def keyboard_on_key_down(self, window, keycode, text, modifiers):\n if 'shift' in keycode[1]:\n self.shift_down = True", "def on_key_release(self, key, key_modifiers):\n if key == arcade.key.LEFT or key == arcade.key.DOWN:\n self.holding_left = False\n\n if key == arcade.key.RIGHT or key == arcade.key.UP:\n self.holding_right = False", "def debounced_key_release(event):\n # print('Debounced release', repr(event.key))\n key_indicator.set_text('')\n fig.canvas.draw()", "def _check_keyUP_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False", "def _handle_key_press(self, event: pygame.event.Event) -> None:\n if event.key == K_0:\n self._update_input('0')\n elif event.key == K_1:\n self._update_input('1')\n elif event.key == K_2:\n self._update_input('2')\n elif event.key == K_3:\n self._update_input('3')\n elif event.key == K_4:\n self._update_input('4')\n elif event.key == K_5:\n self._update_input('5')\n elif event.key == K_6:\n self._update_input('6')\n elif event.key == K_7:\n self._update_input('7')\n elif event.key == K_8:\n self._update_input('8')\n elif event.key == K_9:\n self._update_input('9')\n elif event.key == K_BACKSPACE:\n self._update_input('BACKSPACE')", "def handle_pygame_event(self, event):\n if event.type != KEYDOWN:\n # nothing to do\n return\n if event.key == pygame.K_LEFT:\n self.model.change_paddle_velocity(-1)\n elif event.key == pygame.K_RIGHT:\n self.model.change_paddle_velocity(1)", "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def key_down(event, ai, var, screen, ship, shots, enemies, charges, shields, hub):\r\n\tif event.key == pygame.K_UP:\r\n\t\tship.move_up = 1\r\n\telif event.key == pygame.K_DOWN:\r\n\t\tship.move_down = 1\r\n\telif event.key == pygame.K_SPACE:\r\n\t\tshoot_bullet(ai, screen, ship, shots, enemies)\r\n\t\tbegin_charge(ai, var, screen, ship, charges)\r\n\telif event.key == pygame.K_RSHIFT or event.key == pygame.K_LSHIFT:\r\n\t\tcall_shield(ai, var, screen, ship, shields, hub)\r\n\telif event.key == pygame.K_q:\r\n\t\tsys.exit()\r\n\t#elif event.key == pygame.K_p:\r\n\t#\thub.pause = 1\r\n\telif event.key == pygame.K_z:\r\n\t\thub.za_wurado(ai)", "def keyboard_end_game_control(self, app):\n mx, my = pg.mouse.get_pos()\n click = False\n\n game_view = self.get_view.game_view\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n if game_view.back_menu_button.collidepoint((mx, my)):\n if click:\n app.end_game_running = False", "def keyReleaseEvent(self, event):\n if event.key() not in self.inputs.keys():\n self.inputs[event.key()] = [False, 0]\n # end if not in dict, add key to dict\n self.inputs[event.key()][0] = False\n\n for game_object in self.game_engine.game_objects:\n game_object.key_release_event(event)\n # end for", "def switch_off_key(self, key):\n if key not in self.switched_off_keys:\n self._switched_off_keys.append(key)\n self._config[\"# \"+key] = self._config.pop(key)", "def handle_keyrelease_event(event, labels):\n\tglobal current_user\n\tglobal current_mode\n\t\n\t(instruction_label, response_label, congrats_label) = labels\n\n\tif current_mode == \"number\":\n\t\tnum_char = str(event.char)\n\t\tif num_char in ['1','2','3','4','5','6','7']:\n\t\t\tpush_update(current_user, int(num_char))\n\t\t\tcongrats_label.temp_update(random.choice(messages), 1500)\n\t\t\tcurrent_mode = \"user\"\n\t\t\tinstruction_label.update(\"Please enter user character...\")", "def modifierDown(self, event):\n if event.keysym in ['Shift_L', 'Shift_R', 'Control_L', 'Control_R',\n 'Alt_L', 'Alt_R']:\n self.kbdModifier[event.keysym] = 1\n # grab all event to make sure get the key release event even\n # if the mouse is outside the application\n\n # we have problems with this because when we release we loose\n # the grab. As a consequence, if SHIFT+buttton1 was used to start\n # a rubberband and SHIFT is released BEFORE the button,\n # We loose motion and release event and the line stops moving\n # and is never deleted :(\n\n # this was an attempt to have tha canvas set the grab. But then\n # modifier event are not caught !\n \n #self.oldgrab = self.master.grab_current()\n #print 'setting global grab', self.oldgrab\n #self.master.grab_set_global()\n\t self.keybdModifierCallbacksDown[event.keysym].CallCallbacks(event)", "def __followerExit(self):\r\n self.phone.select('KBD_KEY_KEYLOCK_TOGGLE', doNotReport=True)\r\n self.phone.delay(5000, False)\r\n self.phone.select('KBD_KEY_KEYLOCK_TOGGLE', doNotReport=True)\r\n self.phone.delay(1000, False)\r\n self.__backToIdleWithSwipe() # open screen lock\r\n self.phone.delay(1500, False)\r\n self.__backToIdleWithSwipe() # close application with swipe\r\n self.phone.delay(1000, False)\r\n\r\n self.__backToIdleWithBackPress(True) # if still in some application, this could close it\r\n self.phone.delay(500, False)\r\n self.__backToIdleWithBackPress() # return to idle screen top with 2 back press\r\n self.phone.delay(500, False)\r\n self.__backToIdleWithBackPress()\r\n self.phone.delay(2000, False)", "def on_press(key):\n try:\n # gets pressed key char value and searches it from dict with get method.\n mapped_key = key_mappings.get(key.char) # gets value and type tuple or None\n if mapped_key:\n module.pressed_key = mapped_key\n except AttributeError:\n traceback.print_exc()\n except KeyboardInterrupt:\n print(f\"\\n{module.current_time()} Application stopped\")", "def on_key_press(self, pressed, modifiers):\n if pressed == key.ESCAPE: self.save_world(); self.close(); log.INFO(\"MineGlet was closed!\")\n elif pressed == key.E: self.mouse_lock = not self.mouse_lock", "def home(self):\n self.input_key_event(InputActions.HOME)", "def on_key_release(self, key, modifiers):\r\n if key == arcade.key.UP or key == arcade.key.DOWN:\r\n self.player.change_y = 0\r\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\r\n self.player.change_x = 0", "def on_key_release(self, key, modifiers):\r\n if key == arcade.key.UP or key == arcade.key.DOWN:\r\n self.player.change_y = 0\r\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\r\n self.player.change_x = 0", "def down(self, *args):\n self.cur_win().down()", "def on_key_press(self, key):\n if key == 'esc':\n self.backtrack()\n elif key in ['f1', '?']:\n self.open(HelpPane(self._get_current_pane()))", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n\n self.key_handler(key)", "def OnStopPress(self, event):\n\t\tself.onOffText.SetLabel('Off')\n\t\tself.isBaselineRunning = False\n\t\tself.hasBaselineEnded = True", "def _on_keyboard_down(self, keyboard, keycode, char, modifiers):\n\n print(f\"Keystroke: char={char}, code={keycode}, mods={modifiers}\")\n if keycode[0] == 27: # use the Escape key to toggle modes.\n self.toggle_speak_mode()\n elif self._speakmode == 'SAY_LETTERS':\n self.say_letter(keyboard, keycode, char, modifiers)\n else:\n self.say_word(keyboard, keycode, char, modifiers)\n return True", "def on_key_release(self, key, modifiers):\n if self.current_state == GAME_RUNNING:\n if key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player_sprite.change_x = 0\n elif key == arcade.key.UP or key == arcade.key.DOWN:\n self.player_sprite.change_y = 0\n elif key == arcade.key.SPACE:\n self.player_sprite.speed = 0\n elif key == arcade.key.ESCAPE:\n if self.gameover:\n self.gameover = 0\n self.instruction_screen()", "def on_key_release(self, key, modifiers):\n if key == arcade.key.LEFT:\n self.player_sprite.stop_left()\n elif key == arcade.key.RIGHT:\n self.player_sprite.stop_right()", "def on_key_release(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0", "def on_keyboard_closed(self):\n self.keyboard.unbind(on_key_down=self.on_key_down)\n self.keyboard.unbind(on_key_up=self.on_key_up)\n self.keyboard = None", "def menu_key_control(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.menu_running = False\n self.running = False\n self.closing_menu = False\n return False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1:\n self.number_limit_of_tie_fighters = 2\n self.menu_running = False\n if event.key == pygame.K_2:\n self.number_limit_of_tie_fighters = 3\n self.menu_running = False\n if event.key == pygame.K_3:\n self.menu_running = True\n self.running = True\n self.restart = True\n return False\n if event.key == pygame.K_4:\n self.closing_menu = False\n return False\n return True", "def key_event(self, key: Any, action: Any):\n pass", "def _onkeyrelease(self, fun, key):\n if fun is None:\n self.cv.unbind(\"<KeyRelease-%s>\" % key, None)\n else:\n def eventfun(event):\n fun()\n self.cv.bind(\"<KeyRelease-%s>\" % key, eventfun)", "def on_release(self, keyname):\n self.keydown = False\n keyname = str(keyname).strip('\\'')\n log.info('KEY RELEASE ' + keyname)\n if keyname in self.controls_keyrelease:\n key_handler = self.controls_keyrelease[keyname]()", "def unpress(self):\n if self.unclick:\n self.clicked = False", "def keyReleaseEvent(self, event: QtGui.QKeyEvent) -> None:\n if event.key() in [Qt.Key_W, Qt.Key_S, Qt.Key_A, Qt.Key_D] and self.__enable_key:\n new_direction = self.__directions.index(event.text())\n # ignore opposite direction\n if (new_direction + 2) % 4 == self.__h_direction:\n return\n self.__h_direction = new_direction\n if event.isAutoRepeat():\n self.__change_speed(self.__acc_step)\n print(f'{event.text().capitalize()}:accelerate speed')\n else:\n self.__change_speed(self.__step)\n print(f'{event.text().capitalize()}:normal speed')", "def _check_keyup_event(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n if event.key == pygame.K_LEFT:\n self.ship.moving_left = False", "def updateInactive(self):\n change = self.input.key_count > 0 and self.lastkeys == 0 #ADD MORE ATTRIBUTES\n if change:\n keyPressed = self.input.is_key_down('f') or \\\n self.input.is_key_down('F')\n if keyPressed:\n self.setState(STATE_NEWWAVE)\n self.welcomeScreen()\n self.lastkeys == self.input.key_count #ADD MORE ATTRIBUTES", "def on_key_release(self, key, modifiers):\n if key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n elif key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0", "def ev_keydown(self, event: KeyDown) -> None:", "def keyReleaseEvent(self, ev):\n self.currentKbKey = None\n\n if (ev.key() == self.panKey):\n # disable Pan/Zoom mode\n self.panning = False\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.selectAddKey):\n # disable selection add mode\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.zoomKey):\n # disable zoom mode\n self.__zooming = False\n else:\n self.keyRelease.emit(self, ev)", "def keyPressEvent(self, event):\n self.game_engine.input_manager.keyPressEvent(event)", "def __keystroke(self, event):\n if event.state - self.__previous_state == 4: # means that the Control key is pressed\n pass # do nothing if Control key is pressed\n else:\n self.__previous_state = event.state # remember the last keystroke state\n # Up, Down, Left, Right keystrokes\n if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right'\n self.__scroll_x('scroll', 1, 'unit', event=event)\n elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left'\n self.__scroll_x('scroll', -1, 'unit', event=event)\n elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up'\n self.__scroll_y('scroll', -1, 'unit', event=event)\n elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down'\n self.__scroll_y('scroll', 1, 'unit', event=event)", "def on_key(self, _window, key, _scancode, action, _mods):\n is_press = action == glfw.PRESS or action == glfw.REPEAT\n if is_press and (key == glfw.KEY_ESCAPE or key == glfw.KEY_Q):\n glfw.set_window_should_close(self.window, True)\n\n if action != glfw.REPEAT:\n self.key_handler(key, is_press)", "def key_up(key):\n\n vk = key\n # XXX exception if >= 256\n _key_up(vk)", "def XPLoseKeyboardFocus(inWidget):\n pass", "def _check_keyup_events(self, event):\r\n if event.key == pg.K_RIGHT:\r\n self.ship.moving_right = False\r\n elif event.key == pg.K_LEFT:\r\n self.ship.moving_left = False", "def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False", "def keyboard_menu_control(self, app):\n mx, my = pg.mouse.get_pos()\n click = False\n\n menu_view = self.get_view.menu_view\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n if menu_view.menu_button.collidepoint((mx, my)):\n if click:\n app.menu_view_running = False\n\n if menu_view.quit_button.collidepoint((mx, my)):\n if click:\n pg.quit()\n sys.exit(0)", "def on_keydown(self, keys, game) -> None:\n pass" ]
[ "0.75300187", "0.72335577", "0.7176089", "0.71289796", "0.70359325", "0.6950214", "0.6945082", "0.69241786", "0.6912116", "0.6841378", "0.6836066", "0.6817194", "0.6815543", "0.6788287", "0.6781232", "0.6695377", "0.6695377", "0.6574853", "0.6572941", "0.6561654", "0.65368277", "0.6524818", "0.65216756", "0.6517289", "0.65142286", "0.65010214", "0.6497143", "0.6487595", "0.6486961", "0.6479472", "0.64783406", "0.6475913", "0.64614695", "0.6420613", "0.641577", "0.6405633", "0.6401619", "0.6399758", "0.6399758", "0.6394231", "0.63908076", "0.6377259", "0.6364483", "0.6362936", "0.6350783", "0.63210654", "0.6317266", "0.6317266", "0.6308317", "0.6306946", "0.6297494", "0.6297494", "0.62914205", "0.6283493", "0.62819105", "0.62571096", "0.6247068", "0.6243458", "0.624213", "0.6236737", "0.6234811", "0.62303287", "0.6222047", "0.6220513", "0.62186205", "0.62173265", "0.62045246", "0.6202609", "0.61945516", "0.6191538", "0.61844134", "0.61844134", "0.61778235", "0.6175136", "0.61720407", "0.616539", "0.6165224", "0.6164778", "0.61624134", "0.6158567", "0.61495835", "0.61453927", "0.6143432", "0.61427855", "0.61381114", "0.61369026", "0.6133694", "0.61289674", "0.6127541", "0.6125264", "0.61243784", "0.61202145", "0.6114999", "0.6107166", "0.6101045", "0.6085791", "0.6082761", "0.60738915", "0.606849", "0.60638475", "0.6058212" ]
0.0
-1
Get the current menu (if any) for display
def get_menu_for_display(self): game = self.game if not game.menu: return menu = game.menu if not menu.is_visible: return None return menu
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_menu(self) -> Optional[Menu]:\n return self.menu_pointer", "def menu(self):\n return self._menu", "def GetMenu(self):\n return self._menu", "def get_menu ( self, object ):\n return self.menu", "def get_menu(menu_name):\n\n pass", "def menu(self) -> CursesMenu | None: # type: ignore[override]\n return self._menu", "def get_menus():\n\n pass", "def current_item(self) -> MenuItem | None:\n if not self.all_items:\n return None\n else:\n return self.all_items[self.current_option]", "def get_to_main_menu(self):\n return self.__toMainMenu", "def getMenu(self, name):\n if self.__object is not None:\n return self.__object.getMenu(name)\n else:\n return None", "def _get_menu(menu_name=None):\n if menu_name is None:\n menu_name = pipeline._menu\n\n widgets = dict((\n w.objectName(), w) for w in QtWidgets.QApplication.allWidgets())\n menu = widgets.get(menu_name)\n return menu", "def getMenuOption():\n return menu_option", "def activeMenuReference(self):\n return self.root_window.ui.menus.global_window_menus.debug", "def get_maya_menu():\n menuBar = [m for m in get_maya_window().children() if type(m) == QtWidgets.QMenuBar] or [None]\n return menuBar[0]", "def get_menu ( self, object, row ):\n return self.menu", "def get_app_menu(self): # real signature unknown; restored from __doc__\n pass", "def main_menu(self):\n return self.sitemap", "def GetMenu(self, name):\n # type: (str) -> Optional[QtWidgets.QMenu]\n return self._menus.get(name)", "def menus(self):\r\n return []", "def get_menu() -> str:\n date = datetime.date.today()\n urls = generate_urls(date)\n menu_json = fetch_menu(urls)\n menu = extract_menu(menu_json, date)\n\n return menu", "def get_menu_items():\n\n pass", "def menu(self):\n try:\n return get_template('{}/menu.html'.format(self.label))\n except TemplateDoesNotExist:\n return Template('')", "def get_one_menu_option():", "def main_menu(self):\n menu_string = \"Main menu\\n\"\n menu_string += \"\\t1. Modify a list\\n\"\n menu_string += \"\\t2. Grade submenu\\n\"\n menu_string += \"\\t3. Search for something\\n\"\n menu_string += \"\\t4. Get a statistic\\n\"\n menu_string += \"\\t5. Undo/Redo\\n\"\n menu_string += \"\\t0. Exit\\n\"\n stop = False\n\n while not stop:\n command_list = \\\n {'0': self.__no_command,\n '1': self.__modify_submenu,\n '2': self.__grade_submenu,\n '3': self.__search_submenu,\n '4': self.__statistics_submenu,\n '5': self.__undo_submenu\n }\n command = self.__ui_read_command(menu_string)\n\n if command in command_list.keys():\n if command == '0':\n return\n else:\n command_list[command]()\n\n else:\n print(\"Invalid command!\")", "def GetCurrentContext(self):\n # type: () -> MenuContext\n if self._contextCallback:\n return self._contextCallback()", "def menu_handler(self):\n return self._menu_handler", "async def top_menu(self) -> None:\n return await self.relay(\"top_menu\")()", "def get_all_menu():", "def getMenuItem(self, event):\n return self.GetMenuBar().FindItemById(event.GetId())", "def get_current(self):\n return self.current", "def get_menu_item(menu_item_name):\n\n pass", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values\n # You may change the menu if you'd like to add an experimental method\n menu = {\"n\": (\"Navigate forward\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"c\": (\"Calibrate\", self.calibrate),\n \"t\": (\"test restore\", self.calibrate),\n \"s\": (\"Check status\", self.status),\n \"q\": (\"Quit\", quit_now)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = raw_input(\"Your selection: \")\n # activate the item selected\n menu.get(ans, [None, error])[1]()", "def submenu(self) -> CursesMenu | None:\n return self._submenu", "def menu(self):\n from mainmenu import Menu\n gm = Menu(self.screen)\n gm.run()", "def present_menu (self, menu, groupName = 'main'):\n \n if not hasattr (cherrypy.request, 'nav'):\n cherrypy.request.nav = {}\n\n if not groupName in cherrypy.request.nav:\n cherrypy.request.nav [groupName] = []\n \n for item in menu.items:\n cherrypy.request.nav [groupName].append (item)", "def file_menu(self):\n return self.GetMenu(self.FindMenu(\"File\"))", "def create_menu():", "def main_menu ( self ):\n\t\tif self.style == 'qt':\n\t\t\tp = Process( target=self.qt_main_menu )\n\t\t\tp.start()\n\t\t\tself.menus.append( p )", "def navigate_mainMenu():\r\n msg, flag = \"\", False\r\n try: \r\n 'Click on the main menu item in OMM home page'\r\n \r\n flag = ui_controls.button(get_obj_identifier('mnu_btn'))\r\n if flag:\r\n print \"Main menu icon in home page is clicked\"\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def current():\n\n return {\n 'page': 'current',\n }", "def createMenu():\n mType = -1\n if auth.is_logged_in() and auth.has_membership('administrador',auth.user.id):\n return menuAdmin\n elif auth.is_logged_in():\n return menuUser\n else:\n return menuPublic", "def in_game_menu ( self ):\n\t\tif self.style == 'qt':\n\t\t\tp = Process( target=self.qt_in_game_menu )\n\t\t\tp.start()\n\t\t\tself.menus.append( p )", "def __previous_menu(self, *args):\n assert len(self.__previous_menus) >= 2\n\n # First, we're going to have to pop off the current menu.\n self.__previous_menus.pop()\n # Now, get the previous menu.\n menu_name = self.__previous_menus.pop()\n\n # Indicate that we should go to it.\n return menu_name", "def menu():\n return render_template('menu.html')", "def get_header_menu_text(self, menu):\n if menu == BasePage.HOME:\n home = self.browser.find_element(*locators.HOME_LINK).text\n return home\n elif menu == BasePage.SERVICE:\n services = self.browser.find_element(*locators.SERVICE_LINK).text\n return services\n elif menu == BasePage.CONTACT_FORM:\n contact_form = self.browser.find_element(*locators.CONTACT_FORM_LINK).text\n return contact_form\n elif menu == BasePage.METALS_COLORS:\n metals_colors = self.browser.find_element(*locators.METALS_COLORS_LINK).text\n return metals_colors", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def current_gui(self):\n return self._current_gui", "def menu(self):\n print(f\"{str(self)}\")", "def display_menu(self):\n return ', '.join(menu.name for menu in self.menu.all()[:3])", "def get_menu(cls, menu_type=None):\n # If no specific menu is requested, get the default.\n if menu_type is None:\n menu_type = cls.menu_in_use\n # Attempt to access a cached menu by its type.\n try:\n menu = cls.menus[cls.menu_in_use]\n # If there is no cached menu for that type...\n except KeyError:\n # Create and cache a new instance of the desired menu type.\n menu = cls.menus[menu_type] = cls._build(menu_type)\n if menu is None:\n raise KeyError(f\"No such menu type {menu_type}\")\n return menu", "def back_to_menu_info(cls):\n print(\n \"\"\"\n ________________________________________________\n\n HABITSBOX\n ________________________________________________\n Hint: Press 0 (zero) to return to the main menu\n ------------------------------------------------\"\"\")", "def selected_item(self) -> MenuItem | None:\n if self.selected_option == -1:\n return None\n else:\n return self.all_items[self.selected_option]", "def GetMenuContext(self):\n # type: () -> MenuContext\n if self._contextProvider is not None:\n return self._contextProvider.GetMenuContext()\n raise NotImplementedError('No context provider set and GetMenuContext '\n 'not reimplemented')", "def get_current_item(self, *args):\n return _ida_hexrays.vdui_t_get_current_item(self, *args)", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"c\": (\"Calibrate\", self.calibrate),\n \"d\": (\"Dance\", self.dance),\n \"h\": (\"Hold position\", self.hold_position),\n \"n\": (\"Navigate\", self.nav),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"q\": (\"Quit\", self.quit),\n \"v\": (\"Veer\", self.slither)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def get_menu_items(self) -> typing.List[typing.Tuple[str, typing.List[typing.Tuple[str, typing.Callable[[], None]]]]]: #this method is to be queried by the root frame when it is creating the menu bar at the top of the screen and needs options to put in it\n return []", "def _MenuAboutToShow(self, menuName):\n menu = self._menus[menuName]\n context = self._contextProvider.GetMenuContext()\n for action in menu.actions():\n if action.isSeparator():\n continue\n actionData = action.data()\n if actionData and isinstance(actionData, MenuAction):\n actionData.Update(action, context)", "def show_menu():\n if not GD.gui.menu.item('Tools'):\n create_menu()", "def _get_template_menu_lst(self):\n if self.menulst is None:\n template_menulst = os.path.join(self.options.sourcedir, \"hake\",\n self.menulst_template)\n with open(template_menulst) as f:\n self.menulst = f.readlines()\n\n return self.menulst", "def menu_screen(win):\n\tpass", "def menu(self):\n menu = list()\n \n \n menu.extend([\n {\n 'title': 'Bootstrap Demo',\n 'href': self.request.route_url('bootstrap_demo'),\n 'icon': \"fa fa-twitter-square\"\n },\n {\n 'title': 'Jade Demo',\n 'href': self.request.route_url('jade_demo'),\n 'icon': \"fa fa-indent\"\n },\n ])\n if self.user:\n menu.extend([\n {\n 'title': 'Entities',\n 'icon': \"fa fa-bar-chart\",\n 'dropdown': [\n {\n 'title': 'All entities',\n 'href': self.request.route_url(\n 'entities',\n ext='html',\n _query={\n 'renderer': 'datatable',\n 'options': 'serverside-columnsearch'\n }\n ),\n 'icon': \"fa fa-bar-chart\"},\n {\n 'title': 'CPTs',\n 'href': self.request.route_url(\n 'cpts',\n ext='html',\n _query={\n 'renderer': 'datatable',\n 'options': 'columnsearch'\n }\n ),\n }\n ]\n }\n ]),\n if self.user.has_admin:\n menu.append(\n {\n 'title': \"User Management\",\n 'icon': \"fa fa-users\",\n 'dropdown': [\n {\n 'title': 'User Overview',\n 'href': self.request.route_url(\n 'users',\n ext='html',\n _query={\n 'renderer': 'datatable',\n 'options': 'serverside-columnsearch'\n }\n ),\n 'icon': 'fa fa-users',\n },\n {\n 'title': 'Add User',\n 'href': self.request.route_url('user_create'),\n 'icon': 'fa fa-user-plus',\n }\n ]\n }\n )\n\n return menu", "def create_menus( self ):", "def print_menu(self):\n for i,x in enumerate(self.menu):\n print(\"%i. %s\"%(i+1,x))\n return self.get_int()", "def Current (cls):\n if cls.__ContextStack:\n return cls.__ContextStack[-1]\n return None", "def main_menu(self) -> str:\n print(\" ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n print(\" MENU PRINCIPALE \")\n print(\n \"\\n\"\n \" ● 1 - Créer un tournoi ●\\n\"\n \" ● 2 - Ajouter des joueurs à un tournoi ●\\n\"\n \" ● 3 - Débuté ou continué un tournoi ●\\n\"\n \" ● 4 - Ajouter un nouveau joueur ●\\n\"\n \" ● 5 - Modifier classement d'un joueur ●\\n\"\n \" ● 6 - Menu Secondaire ●\\n\"\n \" ● 7 - Quitter l'application ● \"\n\n )\n print(\" ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n response = input(\"Choississez un chiffre pour naviguer dans le menu : \")\n return response", "def selected_title(self):\r\n try:\r\n return menu_selected[self.name]\r\n except KeyError:\r\n return NavButton.selected_title(self)", "def current_context():\n return _current.get()", "def get_menu_items(self):\n url = self.build_url(\"menus/\")\n res = get(url)\n if res.ok:\n return [x[\"item_name\"] for x in res.json()]\n return None", "def current_window(self):\n pass", "def navigate_mainMenu_settings():\r\n msg, flag = \"\", False\r\n try:\r\n 'click on home main menu button'\r\n flag1 = navigate_mainMenu()\r\n\r\n 'Click on the settings item in the list generated from OMM home page -> main menu'\r\n flag2 = ui_controls.button(get_obj_identifier('home_mainMenu_settings_lnk'))\r\n flag = flag1 and flag2\r\n\r\n if flag:\r\n print \"settings in the home page -> main menu button is clicked\"\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def GetCurrentItem(self):\r\n\r\n return self._current", "def get_menubar_path():\n return __get_environ_path('FR_MYMENUBAR')", "def goto_menu(self, *args):\n self.manager.current = 'Main Menu'\n self.reset()\n self.manager.reset()", "def Current(self) -> str:", "def current(cls):\n return stackless.getcurrent()", "def menu():\n menu = 'main'\n while 1:\n if menu == 'main':\n click.echo('Main menu:')\n click.echo(' d: debug menu')\n click.echo(' q: quit')\n char = click.getchar()\n if char == 'd':\n menu = 'debug'\n elif char == 'q':\n menu = 'quit'\n else:\n click.echo('Invalid input')\n elif menu == 'debug':\n click.echo('Debug menu')\n click.echo(' b: back')\n char = click.getchar()\n if char == 'b':\n menu = 'main'\n else:\n click.echo('Invalid input')\n elif menu == 'quit':\n return", "def findMenuInMenuBar(menuBar, title):\n pos = menuBar.FindMenu(title)\n if pos == wx.NOT_FOUND:\n return None\n return menuBar.GetMenu(pos)", "def get_contextual(old_windows, is_fatal=True):\n\n c = [w for w in Gtk.Window.list_toplevels() if w not in\n old_windows and w.get_mapped()]\n if not c:\n if is_fatal:\n gps_fatal_error('No contextual menu created')\n return None\n return c[0]", "def print_name(self):\n logging.info(\"Active menu is {0}\".format(self.name))", "def accessoriesMenu():\n pref = QtGui.QAction(mw)\n pref.setText(\"Command panel\")\n pref.setObjectName(\"CommandPanel\")\n pref.triggered.connect(onPreferences)\n try:\n import AccessoriesMenu\n AccessoriesMenu.addItem(\"CommandPanel\")\n except ImportError:\n a = mw.findChild(QtGui.QAction, \"AccessoriesMenu\")\n if a:\n a.menu().addAction(pref)\n else:\n mb = mw.menuBar()\n action = QtGui.QAction(mw)\n action.setObjectName(\"AccessoriesMenu\")\n action.setIconText(\"Accessories\")\n menu = QtGui.QMenu()\n action.setMenu(menu)\n menu.addAction(pref)\n\n def addMenu():\n \"\"\"Add accessories menu to the menu bar.\"\"\"\n toolsMenu = mb.findChild(QtGui.QMenu, \"&Tools\")\n if toolsMenu:\n toolsMenu.addAction(action)\n\n addMenu()\n mw.workbenchActivated.connect(addMenu)", "def getCurrent(self):\n return self.__current", "def initMenu(self, menu):\n menu.clear()\n \n self.subMenus = []\n \n adminMenu = QMenu(self.tr(\"Administration\"), menu)\n adminMenu.setTearOffEnabled(True)\n adminMenu.addAction(self.gitShowConfigAct)\n adminMenu.addAction(self.gitRepoConfigAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitReflogBrowserAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateIgnoreAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateArchiveAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitStatisticsAct)\n adminMenu.addAction(self.gitVerifyAct)\n adminMenu.addAction(self.gitHouseKeepingAct)\n self.subMenus.append(adminMenu)\n \n bundleMenu = QMenu(self.tr(\"Bundle Management\"), menu)\n bundleMenu.setTearOffEnabled(True)\n bundleMenu.addAction(self.gitBundleAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleVerifyAct)\n bundleMenu.addAction(self.gitBundleListHeadsAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleApplyFetchAct)\n bundleMenu.addAction(self.gitBundleApplyPullAct)\n self.subMenus.append(bundleMenu)\n \n patchMenu = QMenu(self.tr(\"Patch Management\"), menu)\n patchMenu.setTearOffEnabled(True)\n patchMenu.addAction(self.gitCheckPatchesAct)\n patchMenu.addAction(self.gitApplyPatchesAct)\n patchMenu.addSeparator()\n patchMenu.addAction(self.gitShowPatcheStatisticsAct)\n self.subMenus.append(patchMenu)\n \n bisectMenu = QMenu(self.tr(\"Bisect\"), menu)\n bisectMenu.setTearOffEnabled(True)\n bisectMenu.addAction(self.gitBisectStartAct)\n bisectMenu.addAction(self.gitBisectStartExtendedAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectGoodAct)\n bisectMenu.addAction(self.gitBisectBadAct)\n bisectMenu.addAction(self.gitBisectSkipAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectResetAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectLogBrowserAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectCreateReplayAct)\n bisectMenu.addAction(self.gitBisectEditReplayAct)\n bisectMenu.addAction(self.gitBisectReplayAct)\n self.subMenus.append(bisectMenu)\n \n tagsMenu = QMenu(self.tr(\"Tags\"), menu)\n tagsMenu.setIcon(UI.PixmapCache.getIcon(\"vcsTag.png\"))\n tagsMenu.setTearOffEnabled(True)\n tagsMenu.addAction(self.vcsTagAct)\n tagsMenu.addAction(self.gitTagListAct)\n tagsMenu.addAction(self.gitDescribeTagAct)\n self.subMenus.append(tagsMenu)\n \n branchesMenu = QMenu(self.tr(\"Branches\"), menu)\n branchesMenu.setIcon(UI.PixmapCache.getIcon(\"vcsBranch.png\"))\n branchesMenu.setTearOffEnabled(True)\n branchesMenu.addAction(self.gitBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitBranchListAct)\n branchesMenu.addAction(self.gitMergedBranchListAct)\n branchesMenu.addAction(self.gitNotMergedBranchListAct)\n branchesMenu.addAction(self.gitShowBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitDeleteRemoteBranchAct)\n self.subMenus.append(branchesMenu)\n \n changesMenu = QMenu(self.tr(\"Manage Changes\"), menu)\n changesMenu.setTearOffEnabled(True)\n changesMenu.addAction(self.gitUnstageAct)\n changesMenu.addAction(self.vcsRevertAct)\n changesMenu.addAction(self.vcsMergeAct)\n changesMenu.addAction(self.gitCommitMergeAct)\n changesMenu.addAction(self.gitCancelMergeAct)\n \n remotesMenu = QMenu(self.tr(\"Remote Repositories\"), menu)\n remotesMenu.setTearOffEnabled(True)\n remotesMenu.addAction(self.gitRemotesShowAct)\n remotesMenu.addAction(self.gitRemoteShowAct)\n remotesMenu.addSeparator()\n remotesMenu.addAction(self.gitRemoteAddAct)\n remotesMenu.addAction(self.gitRemoteRenameAct)\n remotesMenu.addAction(self.gitRemoteChangeUrlAct)\n remotesMenu.addAction(self.gitRemoteCredentialsAct)\n remotesMenu.addAction(self.gitRemoteRemoveAct)\n remotesMenu.addAction(self.gitRemotePruneAct)\n \n cherrypickMenu = QMenu(self.tr(\"Cherry-pick\"), menu)\n cherrypickMenu.setIcon(UI.PixmapCache.getIcon(\"vcsGraft.png\"))\n cherrypickMenu.setTearOffEnabled(True)\n cherrypickMenu.addAction(self.gitCherryPickAct)\n cherrypickMenu.addAction(self.gitCherryPickContinueAct)\n cherrypickMenu.addAction(self.gitCherryPickQuitAct)\n cherrypickMenu.addAction(self.gitCherryPickAbortAct)\n \n stashMenu = QMenu(self.tr(\"Stash\"), menu)\n stashMenu.setTearOffEnabled(True)\n stashMenu.addAction(self.gitStashAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBrowserAct)\n stashMenu.addAction(self.gitStashShowAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashApplyAct)\n stashMenu.addAction(self.gitStashPopAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBranchAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashDropAct)\n stashMenu.addAction(self.gitStashClearAct)\n \n submodulesMenu = QMenu(self.tr(\"Submodules\"), menu)\n submodulesMenu.setTearOffEnabled(True)\n submodulesMenu.addAction(self.gitSubmoduleAddAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesInitAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateInitAct)\n submodulesMenu.addAction(self.gitSubmodulesDeinitAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesUpdateAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateRemoteAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateOptionsAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesSyncAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesListAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesStatusAct)\n submodulesMenu.addAction(self.gitSubmodulesSummaryAct)\n \n act = menu.addAction(\n UI.PixmapCache.getIcon(\n os.path.join(\"VcsPlugins\", \"vcsGit\", \"icons\", \"git.png\")),\n self.vcs.vcsName(), self._vcsInfoDisplay)\n font = act.font()\n font.setBold(True)\n act.setFont(font)\n menu.addSeparator()\n \n menu.addAction(self.gitFetchAct)\n menu.addAction(self.gitPullAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommitAct)\n menu.addAction(self.gitPushAct)\n menu.addSeparator()\n menu.addMenu(changesMenu)\n menu.addMenu(stashMenu)\n menu.addSeparator()\n menu.addMenu(cherrypickMenu)\n menu.addSeparator()\n menu.addMenu(bundleMenu)\n menu.addMenu(patchMenu)\n menu.addSeparator()\n menu.addMenu(remotesMenu)\n menu.addMenu(submodulesMenu)\n menu.addSeparator()\n menu.addMenu(tagsMenu)\n menu.addMenu(branchesMenu)\n menu.addSeparator()\n menu.addAction(self.gitLogBrowserAct)\n menu.addSeparator()\n menu.addAction(self.vcsStatusAct)\n menu.addSeparator()\n menu.addAction(self.vcsDiffAct)\n menu.addAction(self.gitExtDiffAct)\n menu.addSeparator()\n menu.addAction(self.vcsSwitchAct)\n menu.addSeparator()\n menu.addMenu(bisectMenu)\n menu.addSeparator()\n menu.addAction(self.vcsCleanupAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommandAct)\n menu.addSeparator()\n menu.addMenu(adminMenu)\n menu.addSeparator()\n menu.addAction(self.gitEditUserConfigAct)\n menu.addAction(self.gitConfigAct)\n menu.addSeparator()\n menu.addAction(self.vcsNewAct)\n menu.addAction(self.vcsExportAct)", "def _syncDisplayMenu(ned, menu):\n pass", "def initMenu(self):\n self.fileMenu = self.menuBar().addMenu(self.tr(\"&File\"))\n self.fileMenu.addAction(self.createProjectAction)\n self.fileMenu.addAction(self.openProjectAction)\n\n #TODO : problem displaying submenu\n #self.recentMenu = self.fileMenu.addMenu(self.tr(\"Open &recent\"))\n #for recentProject in self._controller.getSession().recentProjects():\n #recentAction = QtGui.QAction(self.tr(str(recentProject.getPath())), self)\n #self.connect(recentAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"openRecent(recentProject.getPath())\"))\n #self.recentMenu.addAction(recentAction)\n\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.importVideoAction)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.saveProjectAction)\n\n self.helpMenu = self.menuBar().addMenu(self.tr(\"&Help\"))\n self.helpMenu.addAction(self.aboutAction)", "def checkMenuItem(self):\r\n self.eventID, self.parameter, res = self.receiver.getMenuItem()\r\n\r\n return res", "def getMenuNames(self):\n if self.__object is not None:\n return list(self.__menus.keys())\n else:\n return []", "def menu():\n logout_user()\n return render_template('menu.html')", "def get_navbar_active(request):\n parts = request.path.split('/')\n return parts[1] or 'home'", "def current_control(self):\n return self.layout.current_control", "def __admin_menu(self):\n log.debug(\"Displaying __admin_menu\")\n self.menu = TelegramMenu(\"config/comunda_admin_menu.bpmn\", self, \"MenuStart\")\n self.menu.admin_menu(\"MenuStart\", \"menu_admin_main_txt\")\n return", "def get_str_menu1(self, paused = False):\n self.last = \"get_str_menu1\"\n self.check_task()\n #Siempre que pase por aquei, actualizo tareas\n print \"Menu principal\"\n delimiter = \"\\n********************\\n\"\n user_delimiter = \"User: \" + self.user_name\n keys =''\n menu_str =''\n self.num_order_list_ops = 1\n if self.tasks:\n if len(self.tasks)>0:\n menu_str += u\"0 -> Tareas Asignadas\\n\"\n if self.tasks_paused():\n #menu_str += u\"1 -> Tarea de ubicacion\\n2 -> Tarea de reposicion\\n3 -> Tarea de picking\\n\"\n menu_str += u\"1 -> Tarea de ubicacion\\n3 -> Tarea de picking\\n\"\n menu_str+= u\"4 -> Movimiento Manual\\n\"\n menu_str+= u\"8 -> Info Etiqueta\\n\"\n menu_str+= u\"9 -> Herramientas\\n\"\n\n if self.show_keys:\n keys = u\"%s Atras\"%KEY_VOLVER\n menu_str += delimiter + keys\n return menu_str", "def main_menu_toolbar():\n\n pass", "def _createDisplayMenu(ned, menu):\n pass", "def topLevelMenu(c):\n global thePluginController \n thePluginController.showManagerDialog(c)", "def _status_menu(self):\n title = 'Now Playing: %s' % self._player.get_media_name()\n stats = (\n ('Playback rate', '%1.1fx' % self._player.get_playback_rate()),\n ('Elapsed', _format_time(self._player.get_position())),\n ('Total Length', _format_time(self._player.get_media_length())),\n )\n return '\\n'.join((build_menu(title, action_rows=stats),\n build_menu('Commands', action_rows=self._command_help)))", "def menu_inicial():\n clear_window()\n items = [\"Juego Nuevo\", \"Acerca de\", \"Salir\"]\n while True:\n show_title(\"____ Menu Inicial ____\")\n item = show_menu(items)\n clear_window()\n if item == 0 :\n juego_nuevo()\n clear_window()\n elif item==1 :\n mostrar_acerca_de()\n clear_window()\n elif item==2 :\n return\n else:\n print \"Opcion invalida\"", "def __help_menu(self):\n log.debug(\"Displaying __help_menu\")\n # Create a keyboard with the user help menu\n keyboard = [[telegram.KeyboardButton(self.loc.get(\"menu_guide\"))],\n [telegram.KeyboardButton(self.loc.get(\"menu_contact_shopkeeper\"))],\n [telegram.KeyboardButton(self.loc.get(\"menu_all_cancel\"))]]\n # Send the previously created keyboard to the user (ensuring it can be clicked only 1 time)\n self.bot.send_message(self.chat.id,\n self.loc.get(\"conversation_open_help_menu\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait for a reply from the user\n selection = self.__wait_for_specific_message([\n self.loc.get(\"menu_guide\"),\n self.loc.get(\"menu_contact_shopkeeper\")\n ], cancellable=True)\n # If the user has selected the Guide option...\n if selection == self.loc.get(\"menu_guide\"):\n # Send them the bot guide\n self.bot.send_message(self.chat.id, self.loc.get(\"help_msg\"))\n # If the user has selected the Order Status option...\n elif selection == self.loc.get(\"menu_contact_shopkeeper\"):\n # Find the list of available shopkeepers\n shopkeepers = self.session.query(db.Admin).filter_by(display_on_help=True).join(db.User).all()\n # Create the string\n shopkeepers_string = \"\\n\".join([admin.user.mention() for admin in shopkeepers])\n # Send the message to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"contact_shopkeeper\", shopkeepers=shopkeepers_string))\n # If the user has selected the Cancel option the function will return immediately", "def get_current(self, event=None):\n childes = self.nb.winfo_children() # return the list objects of child widgets of notebook[tab widget]\n return childes[self.nb.index('current')].winfo_children()[0]", "def get_change_menu(self, menu_name: str):\n if menu_name not in self.menu_dict:\n raise Menu_Manager.NoMenu\n else:\n def return_func():\n self.change_menu(menu_name)\n return return_func" ]
[ "0.8369181", "0.808696", "0.7934751", "0.7843155", "0.7731306", "0.7376547", "0.7319818", "0.73052615", "0.7292033", "0.7227728", "0.7197315", "0.71656644", "0.7100037", "0.7065015", "0.6940004", "0.6820579", "0.680545", "0.6757568", "0.6753027", "0.67357635", "0.67194587", "0.66236395", "0.6617727", "0.6605111", "0.660413", "0.6596653", "0.659616", "0.65486246", "0.644963", "0.64173245", "0.63738465", "0.6326858", "0.63051176", "0.62766343", "0.62598115", "0.6250514", "0.6218", "0.6166773", "0.6153793", "0.6146973", "0.6144861", "0.61366445", "0.61314297", "0.6129522", "0.61099505", "0.6084861", "0.6084861", "0.6080269", "0.6068757", "0.6063267", "0.60282797", "0.6027098", "0.6005089", "0.5992886", "0.5990832", "0.59601825", "0.59502685", "0.5947657", "0.59465903", "0.5939838", "0.59333163", "0.58879304", "0.58726305", "0.586924", "0.5867477", "0.58609056", "0.5859254", "0.5855989", "0.5852927", "0.5850596", "0.58413535", "0.5841152", "0.5837869", "0.58377606", "0.58285445", "0.58178854", "0.581665", "0.58065975", "0.5803274", "0.5791751", "0.5776559", "0.5770785", "0.57653123", "0.57612616", "0.5760862", "0.5748925", "0.57435405", "0.5739778", "0.57386774", "0.5730904", "0.57305735", "0.5714515", "0.57124335", "0.5680613", "0.5678774", "0.5674844", "0.5667119", "0.56655306", "0.565939", "0.565368" ]
0.77827156
4
If the menu should be visible, then draw it
def draw_menu(self): menu = self.get_menu_for_display() if not menu: return menu_center_x, menu_center_y, menu_cords = self.get_menu_coords(menu) arcade.draw_rectangle_filled( menu_center_x, menu_center_y, menu.width, menu.height, COLOUR_MAP[menu.base_colour] ) text_height = menu_cords[0][1] - (menu.button_padding * 3) for text in menu.text_lines: arcade.draw_text( text, menu_center_x, text_height, arcade.color.BLACK, 12, align = "center", anchor_x = "center", anchor_y = "top", ) text_height = text_height - (menu.button_padding * 3) for button_index, button in enumerate(menu.button_list): self.draw_button( button, menu_cords[0][0], menu_cords[0][1], menu.width, menu.height, menu.selected_index == button_index )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self):\n self.menu_pointer.draw()", "def draw_menu(self):\n self.screen.fill(self.menu_color, self.rect)\n pygame.draw.rect(self.screen, self.border_color, self.rect, 5)\n self.screen.blit(self.title_image, self.title_image_rect)\n\n self.play_button.draw_button()", "def draw_main_menu():\n draw_cover()\n draw_menu_buttons()\n draw_border()", "def draw_menu(self, menu):\n for but in menu.get_buttons():\n self.win.blit(but.image, but.rect)", "def draw(self, DISPLAYSURF):\n\n pygame.draw.rect(DISPLAYSURF, GREY, self.menuRect)\n\n for item in self._menu_items:\n if item['menu_type'] == 'Button':\n pygame.draw.rect(DISPLAYSURF, item['colour'], item['rect'])\n DISPLAYSURF.blit(item['text'], (item['rect'].left+7, item['rect'].top+4))\n # highlights\n if item['highlight'] == ACTIVE and item['persistence'] == True:\n if item['colour'] != WHITE:\n pygame.draw.rect(DISPLAYSURF, BLACK, item['rect'], 3)\n else:\n pygame.draw.rect(DISPLAYSURF, BLUE, item['rect'], 3)\n elif item['menu_type'] == 'Slider':\n pygame.draw.rect(DISPLAYSURF, item['colour'], item['rect'])\n DISPLAYSURF.blit(item['text'][0], (item['xpos']+10, item['ypos']-40))\n DISPLAYSURF.blit(item['text'][1], (item['xpos']-5, item['ypos']+15))\n DISPLAYSURF.blit(item['text'][2], (item['xpos']+item['width']-15, item['ypos']+15))\n pygame.draw.line(DISPLAYSURF, item['colour'], (item['xpos'], item['ypos']),\n (item['xpos']+item['width'], item['ypos']))\n for button in self._menu_items:\n if button['menu_type'] == 'Button':\n if button['persistence'] == False:\n if button['rect'].collidepoint(pygame.mouse.get_pos()[0],\n pygame.mouse.get_pos()[1]):\n pygame.draw.rect(DISPLAYSURF, BLUE, button['rect'], 3)", "def drawMenu(self):\r\n menuText, menuSize = self.menuView.draw() \r\n self.drawCenteredText(menuText, menuSize, .5, 11.0/16)", "def display_menu(self):\n menu_view = self.get_view.menu_view\n menu_view.blit_background()\n menu_view.draw_menu_view()\n menu_view.update_display()", "def show_menu():\n if not GD.gui.menu.item('Tools'):\n create_menu()", "def draw_menu(self):\n self.__screen.fill(pygame.Color(\"black\"))\n self.__screen.blit(Constants.Assets.MENU_BACKGROUND_IMG, (0, 0))\n self.__start_button.draw(self.__screen, Constants.WHITE)\n self.__end_button.draw(self.__screen, Constants.WHITE)\n self.__about_button.draw(self.__screen, Constants.WHITE)", "def draw_main_menu(self):\n if self.main_menu_surface is None:\n self.init_main_menu_surface()\n self.main_menu_surface.fill(black)\n self.draw_title()\n for button in self.enabled_buttons():\n self.main_menu_surface.blit(button.get_rendered_button(), button.get_position())\n self.screen.blit(self.main_menu_surface, (0, 0))", "def draw( self, screen, game_self):\r\n\r\n if self.is_visible == False: return\r\n \r\n window.Window.draw(self, screen)\r\n\r\n screen.blit( self.top_font, ((self.centerx-self.top_font.get_width()/2), 60))\r\n \r\n\r\n if self.menu == self.SWORD:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 95, 232, 30), 0)\r\n if self.menu == self.KATANA:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 125, 232, 30), 0)\r\n if self.menu == self.BLUNT:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 155, 232, 30), 0)\r\n if self.menu == self.GUN:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 185, 232, 30), 0)\r\n if self.menu == self.THROW: \r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 215, 232, 30), 0)\r\n if self.menu == self.SHIELD:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 245, 232, 30), 0)\r\n if self.menu == self.ARMOR:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 275, 232, 30), 0)\r\n if self.menu == self.HELMET:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 305, 232, 30), 0)\r\n if self.menu == self.GAUNTLET:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 335, 232, 30), 0)\r\n if self.menu == self.ACCESSORY:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 365, 232, 30), 0)\r\n if self.menu == self.ITEM:\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect(204, 395, 232, 30), 0)\r\n\r\n\r\n screen.blit(self.sword_font, ((self.centerx-self.sword_font.get_width()/2), 100))\r\n screen.blit(self.katana_font, ((self.centerx-self.katana_font.get_width()/2), 130))\r\n screen.blit(self.blunt_font, ((self.centerx-self.blunt_font.get_width()/2), 160))\r\n screen.blit(self.gun_font, ((self.centerx-self.gun_font.get_width()/2), 190))\r\n screen.blit(self.throw_font, ((self.centerx-self.throw_font.get_width()/2), 220))\r\n screen.blit(self.shield_font, ((self.centerx-self.shield_font.get_width()/2), 250))\r\n screen.blit(self.armor_font, ((self.centerx-self.armor_font.get_width()/2), 280))\r\n screen.blit(self.helmet_font, ((self.centerx-self.helmet_font.get_width()/2), 310))\r\n screen.blit(self.gauntlet_font, ((self.centerx-self.gauntlet_font.get_width()/2), 340))\r\n screen.blit(self.accessory_font, ((self.centerx-self.accessory_font.get_width()/2), 370))\r\n screen.blit(self.item_font, ((self.centerx-self.item_font.get_width()/2), 400))\r\n\r\n\r\n #draw extra window\r\n self.buy_window.draw(screen, game_self)", "def draw(self):\n b = DrawBuffer()\n cNormal = self.getColor(0x0301)\n b.moveChar(0, ' ', cNormal, self.size.x)\n if self.menu and self.menu.items:\n x = 1\n items = (item for item in self.menu.items if item.name)\n for p in items:\n nameLen = nameLength(p.name)\n if x + nameLen < self.size.x:\n color = self._chooseColor(p)\n b.moveChar(x, ' ', color, 1)\n b.moveCStr(x + 1, p.name, color)\n b.moveChar(x + nameLen + 1, ' ', color, 1)\n x += (nameLen + 2)\n self.writeBuf(0, 0, self.size.x, 1, b)", "def _createDisplayMenu(ned, menu):\n pass", "def show_menus(self, type_):\n if type_ == self._current:\n # do nothing\n pass\n else:\n if self._current == self.TYPE_VOIGT:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n elif self._current == self.TYPE_GISO:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n\n # Rebuild the view menu by deleting everything from it and then \n # reappending the appropriate items.\n while self.view_menu.GetMenuItemCount():\n #self.view_menu.DeleteItem(self.view_menu.FindItemByPosition(0))\n self.view_menu.Delete(self.view_menu.FindItemByPosition(0))\n\n _append_items(self._main, self.view_menu, self._menu_data[type_])\n\n if type_ == self.TYPE_VOIGT:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n elif type_ == self.TYPE_GISO:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n\n\n self._current = type_", "def show(self):\n screen_copy = screen.copy()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == pygame.BUTTON_LEFT:\n pos = event.pos\n pos = (pos[0] - self.rect.x, pos[1] - self.rect.y)\n if self.menu_btn.rect.collidepoint(pos):\n return MAIN_MENU\n elif self.restart_level_btn.rect.collidepoint(pos):\n return RESTART_LEVEL\n game_panel_group.draw(self.surface)\n screen_copy.blit(self.surface, self.rect.topleft)\n screen.blit(screen_copy, (0, 0))\n if pygame.mouse.get_focused():\n cursor.show(screen)\n pygame.display.flip()", "def draw_menu(self, context):\n if context.engine == 'RPR':\n layout = self.layout\n layout.popover('RPR_VIEW3D_PT_panel')", "def in_game_menu ( self ):\n\t\tif self.style == 'qt':\n\t\t\tp = Process( target=self.qt_in_game_menu )\n\t\t\tp.start()\n\t\t\tself.menus.append( p )", "def show_menu(self):\n curses.curs_set(0)\n self.main_menu.display()", "def menu(self):\n self.__show_about = False\n pygame.mixer.music.play(loops=-1)\n while True:\n self.draw_menu()\n init.print_about_game(self.__show_about, self.__screen)\n pygame.display.update()\n for event in pygame.event.get():\n pos = pygame.mouse.get_pos()\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.__start_button.mouse_hover(pos):\n pygame.mixer.music.stop()\n pygame.mixer.music.rewind()\n return\n if self.__end_button.mouse_hover(pos):\n pygame.quit()\n quit()\n if self.__about_button.mouse_hover(pos):\n if not self.__show_about:\n self.__show_about = True\n else:\n self.__show_about = False\n if event.type == pygame.MOUSEMOTION:\n if self.__start_button.mouse_hover(pos):\n self.__start_button.color = Constants.DARK_BLUE\n else:\n self.__start_button.color = Constants.BLUE\n if self.__end_button.mouse_hover(pos):\n self.__end_button.color = Constants.RED\n else:\n self.__end_button.color = Constants.BURGUNDY\n if self.__about_button.mouse_hover(pos):\n self.__about_button.color = Constants.DARK_GREEN\n else:\n self.__about_button.color = Constants.BRIGHT_GREEN", "def draw_menu(self, screen: curses.window) -> None:\n screen.clear()\n sh, sw = screen.getmaxyx()\n self.draw_title_window(screen, 3, sw, 0, 0)\n\n bottom_win_height = sh - 2\n output_win_width = sw // 2 + 25\n input_win_width = sw - output_win_width + 1\n\n self.draw_output_window(screen, bottom_win_height - 1, output_win_width, 2, 0)\n self.draw_input_window(screen, bottom_win_height - 1, input_win_width, 2, output_win_width - 1)\n\n self.draw_status_bar(screen)", "def drawPauseMenu(self):\n\n # draws pause menu\n self.__screen.blit(self._image, self._rect)\n\n # draws buttons\n self.drawButtons()", "def draw(self, win):\n img = self.tower_imgs\n win.blit(img, (self.x - img.get_width() // 2, self.y - img.get_height() // 2))\n\n if self.selected:\n self.menu.draw(win)", "def draw_settings_menu(self):\n if self.settings_menu_surface is None:\n self.init_settings_menu()\n self.settings_menu_surface.fill(black)\n title = text_helper.create_text(\"Settings\", menu_fonts, 50, white)\n self.settings_menu_surface.blit(title, (center_horizontally(title, self.screen_dimensions), 50))\n for button in self.buttons:\n self.settings_menu_surface.blit(button.get_rendered_button(), button.get_position())\n self.screen.blit(self.settings_menu_surface, (0, 0))", "def draw(self):\n self.win.fill(BLACK)\n title1 = self.messenger.text_format(\"La casa\", self.font, 58, WHITE)\n title2 = self.messenger.text_format(\"de\", self.font, 48, WHITE)\n title3 = self.messenger.text_format(\"Marcelo\", self.font, 58, WHITE)\n\n title_rect1 = title1.get_rect()\n title_rect2 = title2.get_rect()\n title_rect3 = title3.get_rect()\n\n self.win.blit(title1, (WIDTH / 3.4 - (title_rect1[2] / 2), 90))\n pygame.draw.rect(self.win, RED, (238, 92, 45, 45))\n self.win.blit(title2, (WIDTH / 2 - (title_rect2[2] / 2), 95))\n self.win.blit(title3, (WIDTH / 1.4 - (title_rect3[2] / 2), 90))\n self.draw_menu(WHITE, WHITE)", "def map_menu(self):\n\n # Set the window background\n self.palette = QPalette()\n gradient = QLinearGradient(0, 0, 0, 800)\n gradient.setColorAt(0.0, QColor(227, 0, 77))\n gradient.setColorAt(1.0, QColor(255, 255, 255))\n self.palette.setBrush(QPalette.Window, QBrush(gradient))\n self.setPalette(self.palette)\n\n for item in self.mapmenu_items:\n item.show()\n for item in self.mainmenu_items:\n item.hide()", "def create_menu():", "def side_menu(self, screen):\n font = pg.font.Font('freesansbold.ttf', 30) \n text = font.render('Menu', True, (120,130,135))\n textRect = text.get_rect()\n textRect.center = (1100, 180)\n screen.blit(text, textRect)", "def draw_main(self, window, font, xpos, ypos):\n\n if not self.is_selected_main(xpos, ypos):\n # Text\n name_label = font.render(self.selected_option, 1, self.menu_colour1)\n window.blit(\n name_label,\n (\n self.x + (self.width // 2 - name_label.get_width() // 2),\n self.y + (self.height // 2 - name_label.get_height() // 2),\n ),\n )\n\n else:\n # Define label\n # Generated first so it's width can be used in the background placement calculations\n name_label = font.render(self.selected_option, 1, self.menu_colour2)\n\n # Box\n # x defined in that way so the background is centered\n # width is multiplied by 1.1 to give each option a bit more background on it's sides\n pygame.draw.rect(\n window,\n self.menu_colour1,\n (\n self.x + (self.width // 2 - name_label.get_width() * 1.1 // 2),\n self.y,\n name_label.get_width() * 1.1,\n self.height,\n ),\n 0,\n )\n # Place label\n window.blit(\n name_label,\n (\n self.x + (self.width // 2 - name_label.get_width() // 2),\n self.y + (self.height // 2 - name_label.get_height() // 2),\n ),\n )", "def draw(self, context):\n layout = self.layout\n\n pie = layout.menu_pie()\n pie.operator(\"object.view_menu\", text=\"Node Editor\", icon='NODETREE').vp = \"NODE_EDITOR\"\n pie.operator(\"object.view_menu\", text=\"UV Image Editor\", icon='IMAGE_COL').vp = \"IMAGE_EDITOR\"\n pie.operator(\"object.view_menu\", text=\"Video Sequece Editor\", icon='SEQUENCE').vp = \"SEQUENCE_EDITOR\"\n pie.operator(\"object.view_menu\", text=\"Movie Clip Editor\", icon='CLIP').vp = \"CLIP_EDITOR\"", "def draw( self, screen, game_self):\r\n\r\n if self.is_visible == False: return\r\n \r\n window.Window.draw(self, screen)\r\n\r\n screen.blit( self.top_font, (self.left + 20, self.top+20))\r\n\r\n selected = game_self.shop.shop_window.menu\r\n\r\n #show what is the category\r\n if selected == self.SWORD:\r\n category_font = self.menu_font.render( u\"剣\", True, COLOR_WHITE)\r\n if selected == self.KATANA:\r\n category_font = self.menu_font.render( u\"刀\", True, COLOR_WHITE)\r\n if selected == self.BLUNT:\r\n category_font = self.menu_font.render( u\"鈍器\", True, COLOR_WHITE)\r\n if selected == self.GUN:\r\n category_font = self.menu_font.render( u\"銃\", True, COLOR_WHITE) \r\n if selected == self.THROW:\r\n category_font = self.menu_font.render( u\"投擲\", True, COLOR_WHITE) \r\n\r\n if selected == self.SHIELD:\r\n category_font = self.menu_font.render( u\"盾\", True, COLOR_WHITE)\r\n if selected == self.ARMOR:\r\n category_font = self.menu_font.render( u\"鎧\", True, COLOR_WHITE)\r\n if selected == self.HELMET:\r\n category_font = self.menu_font.render( u\"兜\", True, COLOR_WHITE)\r\n if selected == self.GAUNTLET:\r\n category_font = self.menu_font.render( u\"篭手\", True, COLOR_WHITE)\r\n if selected == self.ACCESSORY:\r\n category_font = self.menu_font.render( u\"アクセサリー\", True, COLOR_WHITE)\r\n\r\n if selected == self.ITEM:\r\n category_font = self.menu_font.render( u\"アイテム\", True, COLOR_WHITE)\r\n \r\n \r\n screen.blit( category_font, (self.left + 20 + self.top_font.get_width(), self.top+20))\r\n\r\n\r\n #store the item in the shop and quantity of it\r\n item_data = game_self.item_data\r\n \r\n #category item is the array of selected category items\r\n self.category_item = game_self.shop.stock[selected]\r\n\r\n #draw the box on item selected\r\n if self.category_item != []:\r\n #draws rectangle on the menu item size of rectangle has width of window rectangle - edge_length*2\r\n #the height depends on the size of font\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect( self.left+4, self.top+55 + 30*self.menu,(self.right-self.left)-8,30), 0)\r\n\r\n #draws the item 10 at a time in the page\r\n i = 0\r\n for item in self.category_item[self.page*10:(self.page+1)*10]:\r\n item_font = item_data[item.id][0].strip(\"\\\"\")\r\n item_font = unicode(item_font, encoding=\"sjis\")\r\n item_font = self.menu_font.render( item_font, True, COLOR_WHITE)\r\n screen.blit( item_font, (self.left + 20, self.top+60+i*30))\r\n cost_font = self.menu_font.render( item_data[item.id][2] + \"TG\", True, COLOR_WHITE)\r\n screen.blit( cost_font, (self.right - 20 - cost_font.get_width(), self.top+60+i*30))\r\n \r\n i+=1\r\n\r\n\r\n self.character_select.draw( screen, game_self)", "def draw_pause_menu(self):\n if self.pause_menu_surface is None:\n self.init_pause_menu()\n self.screen.blit(self.pause_menu_surface, (0, 0))\n title = text_helper.create_text(\"Pause\", menu_fonts, 50, white)\n self.screen.blit(title, (center_horizontally(title, self.screen_dimensions), 50))\n for button in self.buttons:\n self.screen.blit(button.get_rendered_button(), button.get_position())", "def render(self):\n\t\tself._menu.refresh_population()\n\t\tself._menu.blit_and_update()", "def _render(self):\n self._renderer.render_menu()\n pg.display.update()", "def __showMenuCheck(self):\n if not self.__showMenuActions:\n self.__showMenuUpdate()", "def draw(self):\n if (libt.map_is_in_fov(self.handler.fov_map, self.x, self.y) or \n self.handler.world.map[self.x][self.y].seen and self.visible_in_fog):\n libt.console_set_default_foreground(self.handler.game_map, self.colour)\n libt.console_put_char(self.handler.game_map, self.x, self.y, \n self.char, libt.BKGND_NONE)", "def main_menu(self):\n\n # Set the window background\n self.palette = QPalette()\n self.pixmap = QPixmap('./pictures/menu_cat.png').scaled(860, 640)\n self.palette.setBrush(QPalette.Background, QBrush(self.pixmap))\n self.setPalette(self.palette)\n\n for item in self.mainmenu_items:\n item.show()\n for item in self.mapmenu_items:\n item.hide()", "def draw():", "def draw(self):\n self.strip.show()", "def addMenu():\n mb.addAction(actionAccessories)\n actionAccessories.setVisible(True)", "def initMenu(self):\n \n gameDisplay = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\n\n\n stillOn = True\n\n clock = pygame.time.Clock()\n\n def close_fun():\n \n main_menu.disable()\n \n\n def mainmenu_background():\n \"\"\"Background color of the main menu, on this function user can plot\n images, play sounds, etc.\"\"\"\n gameDisplay.fill((40, 0, 40))\n \n\n\n\n\n main_menu = pygameMenu.Menu(gameDisplay,\n bgfun=mainmenu_background,\n color_selected=COLOR_WHITE,\n font=pygameMenu.fonts.FONT_BEBAS,\n font_color=COLOR_BLACK,\n font_size=30,\n menu_alpha=100,\n menu_color=(40,0,40),\n menu_height=600,\n menu_width=800,\n onclose=mainmenu_background,\n option_shadow=False,\n title='RPmG',\n window_height=600,\n window_width=800\n )\n \n\n main_menu.add_option('Save the Game', self.saveGame)\n main_menu.add_option('Close: Pressasdfg esc', PYGAME_MENU_CLOSE)\n\n looping = True\n while looping:\n\n # Tick\n clock.tick(60)\n\n # Application events\n events = pygame.event.get()\n for event in events:\n if event.type == QUIT:\n exit()\n\n # Main menu\n main_menu.mainloop(events)\n looping = False\n\n\n # Flip surface\n pygame.display.flip()", "def startMenu():\n \n screenInf=screenInfo()\n pygame.init()\n screen = pygame.display.set_mode(screenInf.SCREEN_SIZE, FULLSCREEN, 32)\n pygame.mouse.set_visible(False)\n splash = SplashMenu(screen)\n menu = IntroMenu(screen) #####Create IntroMenu objectu\n \n running=True\n \n splash.drawSplashMenu() ####Splash only drawn once at beginning of game\n \n while running:\n menu.drawIntroMenu(screen) ####Draws menu to screen, controls game during this time\n currentGame = menu.getGameInfo()\n if(currentGame == None):\n exit()", "def show_main_menu(self):\n while not self.__play_game and self.__just_launched:\n self.clock.tick(60)\n self.frame_number += 1\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.__just_launched = False\n elif (event.type == pygame.KEYDOWN) or (event.type == pygame.MOUSEBUTTONDOWN):\n self.__play_game = True\n break\n\n self.background.to_canvas(canvas=self.canvas)\n\n self.make_pipes(self.menu_pipes)\n for pipe_set in self.menu_pipes:\n pipe_set.to_canvas(canvas=self.canvas)\n pipe_set.scroll(scroll_speed=2)\n\n # In case the current pipe (pipe in front of the bird) goes off-screen (i.e. x-coordinate <= 0),\n # remove the PipeSet (pipe) object from the PipeSet collection so the collection won't grow too much\n if self.menu_pipes[0].x_coordinate <= self.pipes[0].pipe_width:\n self.menu_pipes.pop(0)\n\n self.ground.to_canvas(canvas=self.canvas)\n\n self.main_menu_screen.title_to_canvas(canvas=self.canvas)\n\n if self.frame_number <= 30:\n self.main_menu_screen.prompt_to_canvas(canvas=self.canvas)\n\n pygame.display.flip()\n\n print(f\"FPS: {self.clock.get_fps()}\")", "def draw( self, screen, character):\r\n\r\n if self.is_visible == False: return\r\n \r\n window.Window.draw(self, screen)\r\n\r\n name_font = self.menu_font.render( character.name, True, COLOR_WHITE)\r\n\r\n screen.blit( name_font, (self.left+20, self.top+20))\r\n screen.blit( self.top_font, (self.left+20+name_font.get_width(), self.top+20))\r\n\r\n #draw the box on item selected\r\n if character.items != []:\r\n #draws rectangle on the menu item size of rectangle has width of window rectangle - edge_length*2\r\n #the height depends on the size of font\r\n pygame.draw.rect(screen, COLOR_GLAY, Rect( self.left+4, self.top+55 + 30*self.menu,(self.right-self.left)-8,30), 0)\r\n\r\n\r\n i = 0\r\n for item in character.items:\r\n item_font = self.menu_font.render( item.name, True, COLOR_WHITE)\r\n screen.blit ( item_font, (self.left+20, self.top+60+i*30))\r\n cost_font = self.menu_font.render( str(item.price/2) + \"TG\", True, COLOR_WHITE)\r\n screen.blit( cost_font, (self.right-20 - cost_font.get_width(), self.top+60+i*30))\r\n\r\n i += 1\r\n\r\n\r\n self.sold_item_window.draw(screen)", "def menu(self):\n from mainmenu import Menu\n gm = Menu(self.screen)\n gm.run()", "def visible(self, show):", "def menu_handler(menu,fileout_path):\r\n \r\n image = Image.open(\"icons/menu_template.jpg\")\r\n draw = ImageDraw.Draw (image)\r\n \r\n if menu is None: \r\n \r\n TITLE_X_OFFSET=0.05*image.size[0]\r\n TITLE_Y_OFFSET=0.25*image.size[1]\r\n \r\n huge_font=ImageFont.truetype(\"fonts\\Ariblk.ttf\",int(image.size[0]*HUGE_FONT/1000)) \r\n\r\n draw.text ( (TITLE_X_OFFSET,TITLE_Y_OFFSET), u'Ementa não disponível', fill=BLACK,font=huge_font)\r\n \r\n else:\r\n\r\n TITLE_X_OFFSET=0.05*image.size[0]\r\n MEAL_X_OFFSET=0.07*image.size[0]\r\n MENU_X_OFFSET=0.10*image.size[0]\r\n DISH_X_OFFSET=0.15*image.size[0]\r\n\r\n TITLE_Y_OFFSET=0.12*image.size[1]\r\n MEAL_Y_OFFSET=0.10*image.size[1]\r\n MENU_Y_OFFSET=0.08*image.size[1]\r\n DISH_Y_OFFSET=0.06*image.size[1]\r\n \r\n \r\n large_font=ImageFont.truetype(\"fonts\\Ariblk.ttf\",int(image.size[0]*LARGE_FONT/1000))\r\n medium_font=ImageFont.truetype(\"fonts\\Ariblk.ttf\",int(image.size[0]*MEDIUM_FONT/1000))\r\n small_font=ImageFont.truetype(\"fonts\\Ariblk.ttf\",int(image.size[0]*SMALL_FONT/1000))\r\n\r\n large_font_bold=ImageFont.truetype(\"fonts\\Ariblk.ttf\",int(image.size[0]*LARGE_FONT/1000))\r\n medium_font_bold=ImageFont.truetype(\"fonts\\Ariblk.ttf\",int(image.size[0]*MEDIUM_FONT/1000))\r\n small_font_bold=ImageFont.truetype(\"fonts\\Ariblk.ttf\",int(image.size[0]*SMALL_FONT/1000))\r\n\r\n y_offset=0\r\n \r\n y_offset+=TITLE_Y_OFFSET\r\n draw.text ( (TITLE_X_OFFSET,y_offset), u'Ementa', fill=BLACK,font=large_font)\r\n \r\n if menu.has_key(u'Almoço'):\r\n y_offset+=MEAL_Y_OFFSET\r\n draw.text ( (MEAL_X_OFFSET,y_offset), u'Almoço', fill=BLACK,font=medium_font_bold)\r\n \r\n\r\n if menu[u'Almoço'].has_key(u'Menú Tradicional'): \r\n y_offset+=MENU_Y_OFFSET \r\n draw.text ( (MENU_X_OFFSET,y_offset), u'- Menú Tradicional', fill= BLACK,font=small_font) \r\n \r\n for elem in menu[u'Almoço'][u'Menú Tradicional']:\r\n y_offset+=DISH_Y_OFFSET\r\n draw.text ( (DISH_X_OFFSET,y_offset), u'\\u2022 ' + elem, fill= BLACK,font=small_font) \r\n \r\n \r\n if menu[u'Almoço'].has_key(u'Menú Macrobiótica'): \r\n y_offset+=MENU_Y_OFFSET \r\n draw.text ( (MENU_X_OFFSET,y_offset), u'- Menú Macrobiótica', fill= BLACK,font=small_font) \r\n \r\n for elem in menu[u'Almoço'][u'Menú Macrobiótica']:\r\n y_offset+=DISH_Y_OFFSET\r\n draw.text ( (DISH_X_OFFSET,y_offset), u'\\u2022 ' + elem, fill= BLACK,font=small_font) \r\n \r\n if menu.has_key(u'Jantar'): \r\n y_offset+=MEAL_Y_OFFSET\r\n draw.text ( (MEAL_X_OFFSET,y_offset), 'Jantar', fill= BLACK,font=medium_font_bold) \r\n \r\n if menu[u'Jantar'].has_key(u'Menú Tradicional'): \r\n y_offset+=MENU_Y_OFFSET \r\n draw.text ( (MENU_X_OFFSET,y_offset), u'- Menú Tradicional', fill= BLACK,font=small_font) \r\n \r\n for elem in menu[u'Jantar'][u'Menú Tradicional']:\r\n y_offset+=DISH_Y_OFFSET\r\n draw.text ( (DISH_X_OFFSET,y_offset), u'\\u2022 ' + elem, fill= BLACK,font=small_font) \r\n \r\n \r\n image.save(join(fileout_path,'menu_file.jpg'))", "def draw(self):\n if context.click():\n self.place()", "def dumbmenu(screen, menu, x_pos = 100, y_pos = 100, font = None,\n size = 70, distance = 1.4, fgcolor = (255,255,255),\n cursorcolor = (255,0,0), exitAllowed = True):\n\n\n\t# Draw the Menupoints\n\tpygame.font.init()\n\tif font == None:\n\t\tmyfont = pygame.font.Font(None, size)\n\telse:\n\t\tmyfont = pygame.font.SysFont(font, size)\n\tcursorpos = 0\n\trenderWithChars = False\n\tfor i in menu:\n\t\tif renderWithChars == False:\n\t\t\ttext = myfont.render(str(cursorpos + 1)+\". \" + i,\n\t\t\t\tTrue, fgcolor)\n\t\telse:\n\t\t\ttext = myfont.render(chr(char)+\". \" + i,\n\t\t\t\tTrue, fgcolor)\n\t\t\tchar += 1\n\t\ttextrect = text.get_rect()\n\t\ttextrect = textrect.move(x_pos, \n\t\t (size // distance * cursorpos) + y_pos)\n\t\tscreen.blit(text, textrect)\n\t\tpygame.display.update(textrect)\n\t\tcursorpos += 1\n\t\tif cursorpos == 9:\n\t\t\trenderWithChars = True\n\t\t\tchar = 65\n\n\t# Draw the \">\", the Cursor\n\tcursorpos = 0\n\tcursor = myfont.render(\">\", True, cursorcolor)\n\tcursorrect = cursor.get_rect()\n\tcursorrect = cursorrect.move(x_pos - (size // distance),\n\t (size // distance * cursorpos) + y_pos)\n\n\t# The whole While-loop takes care to show the Cursor, move the\n\t# Cursor and getting the Keys (1-9 and A-Z) to work...\n\tArrowPressed = True\n\texitMenu = False\n\tclock = pygame.time.Clock()\n\tfiller = pygame.Surface.copy(screen)\n\tfillerrect = filler.get_rect()\n\twhile True:\n\t\tclock.tick(30)\n\t\tif ArrowPressed == True:\n\t\t\tscreen.blit(filler, fillerrect)\n\t\t\tpygame.display.update(cursorrect)\n\t\t\tcursorrect = cursor.get_rect()\n\t\t\tcursorrect = cursorrect.move(x_pos - (size // distance),\n\t\t\t (size // distance * cursorpos) + y_pos)\n\t\t\tscreen.blit(cursor, cursorrect)\n\t\t\tpygame.display.update(cursorrect)\n\t\t\tArrowPressed = False\n\t\tif exitMenu == True:\n\t\t\tbreak\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\treturn -1\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE and exitAllowed == True:\n\t\t\t\t\tif cursorpos == len(menu) - 1:\n\t\t\t\t\t\texitMenu = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tcursorpos = len(menu) - 1; ArrowPressed = True\n\n\n\t\t\t\t# This Section is huge and ugly, I know... But I don't\n\t\t\t\t# know a better method for this^^\n\t\t\t\tif event.key == pygame.K_1:\n\t\t\t\t\tcursorpos = 0; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_2 and len(menu) >= 2:\n\t\t\t\t\tcursorpos = 1; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_3 and len(menu) >= 3:\n\t\t\t\t\tcursorpos = 2; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_4 and len(menu) >= 4:\n\t\t\t\t\tcursorpos = 3; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_5 and len(menu) >= 5:\n\t\t\t\t\tcursorpos = 4; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_6 and len(menu) >= 6:\n\t\t\t\t\tcursorpos = 5; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_7 and len(menu) >= 7:\n\t\t\t\t\tcursorpos = 6; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_8 and len(menu) >= 8:\n\t\t\t\t\tcursorpos = 7; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_9 and len(menu) >= 9:\n\t\t\t\t\tcursorpos = 8; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_a and len(menu) >= 10:\n\t\t\t\t\tcursorpos = 9; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_b and len(menu) >= 11:\n\t\t\t\t\tcursorpos = 10; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_c and len(menu) >= 12:\n\t\t\t\t\tcursorpos = 11; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_d and len(menu) >= 13:\n\t\t\t\t\tcursorpos = 12; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_e and len(menu) >= 14:\n\t\t\t\t\tcursorpos = 13; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_f and len(menu) >= 15:\n\t\t\t\t\tcursorpos = 14; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_g and len(menu) >= 16:\n\t\t\t\t\tcursorpos = 15; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_h and len(menu) >= 17:\n\t\t\t\t\tcursorpos = 16; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_i and len(menu) >= 18:\n\t\t\t\t\tcursorpos = 17; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_j and len(menu) >= 19:\n\t\t\t\t\tcursorpos = 18; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_k and len(menu) >= 20:\n\t\t\t\t\tcursorpos = 19; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_l and len(menu) >= 21:\n\t\t\t\t\tcursorpos = 20; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_m and len(menu) >= 22:\n\t\t\t\t\tcursorpos = 21; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_n and len(menu) >= 23:\n\t\t\t\t\tcursorpos = 22; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_o and len(menu) >= 24:\n\t\t\t\t\tcursorpos = 23; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_p and len(menu) >= 25:\n\t\t\t\t\tcursorpos = 24; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_q and len(menu) >= 26:\n\t\t\t\t\tcursorpos = 25; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_r and len(menu) >= 27:\n\t\t\t\t\tcursorpos = 26; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_s and len(menu) >= 28:\n\t\t\t\t\tcursorpos = 27; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_t and len(menu) >= 29:\n\t\t\t\t\tcursorpos = 28; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_u and len(menu) >= 30:\n\t\t\t\t\tcursorpos = 29; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_v and len(menu) >= 31:\n\t\t\t\t\tcursorpos = 30; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_w and len(menu) >= 32:\n\t\t\t\t\tcursorpos = 31; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_x and len(menu) >= 33:\n\t\t\t\t\tcursorpos = 32; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_y and len(menu) >= 34:\n\t\t\t\t\tcursorpos = 33; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_z and len(menu) >= 35:\n\t\t\t\t\tcursorpos = 34; ArrowPressed = True; exitMenu = True\n\t\t\t\telif event.key == pygame.K_UP:\n\t\t\t\t\tArrowPressed = True\n\t\t\t\t\tif cursorpos == 0:\n\t\t\t\t\t\tcursorpos = len(menu) - 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tcursorpos -= 1\n\t\t\t\telif event.key == pygame.K_DOWN:\n\t\t\t\t\tArrowPressed = True\n\t\t\t\t\tif cursorpos == len(menu) - 1:\n\t\t\t\t\t\tcursorpos = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tcursorpos += 1\n\t\t\t\telif event.key == pygame.K_KP_ENTER or \\\n\t\t\t\t event.key == pygame.K_RETURN:\n\t\t\t\t\t\t\texitMenu = True\n\t\n\treturn cursorpos", "def __init__(self, MENUWIDTH, MENUHEIGHT):\n \n self._menu_items = []\n self.menu_Text = []\n #Background\n self.menuRect = pygame.Rect((0, 0), (MENUWIDTH, MENUHEIGHT))", "def show_hud(self):\r\n self.health.draw(self.screen)\r\n self.ammo.draw(self.screen)\r\n self.shield.draw(self.screen)\r\n if self.stats.stage in self.ai_settings.boss_stages:\r\n self.boss_health.draw(self.screen)", "def create(self):\n # Positioning background and pointer indicator for main menu\n self.surface.blit(self.main_menu_background, (0, 0))\n self.surface.blit(self.main_menu_greets, self.main_menu_greets_position)\n self.show_mouse_position_with_px()\n self.main_menu_buttons()", "def rightmakevisible(self, pos):\n pass", "def draw(self, draw_surface):\n\n # The menu frame and how many surf (frame that appears in the middle\n # of the bottom of the screen).\n draw_surface.blit(self.menu_frame, (176, 112))\n draw_surface.blit(self.how_many_surf, (40, 115))\n\n if self.confirm_toss_response_dialogue is None and \\\n self.threw_away_dialogue is None:\n self.quantity_cursor.draw(draw_surface)\n\n # If on the trow away dialogue we don't need to draw anything else (it\n # is taken care of in the how many surf). Return so that cursor and\n # yes no surf are not drawn.\n if self.threw_away_dialogue is not None:\n return\n\n elif self.confirm_toss_response_dialogue is not None:\n draw_surface.blit(self.yes_no_surf, (195, 127))\n self.cursor.draw(draw_surface)", "def is_visible(self):", "def set_visible(self):\n\t\tself.hide()\n\t\tself.__sys_tray_icon.setVisible(True)", "def draw(self):\n if not self.pressed:\n #draw info prompt in room\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ANTIQUE_BRASS)\n arcade.draw_text(\"?\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw info to top of screen when clicked\n arcade.draw_text(self.text, 10, settings.HEIGHT - 10, arcade.color.BLACK, anchor_x=\"left\", anchor_y=\"top\")", "def _draw_main_menu(client, commands):\n client.send(\"\\n\")\n client.send_wrapped(\"~=\" * 20)\n client.send(\" \" * 16 +\"Welcome to \" + PRODUCT_NAME + \"\\n\")\n client.send_wrapped(\"~=\" * 20)\n client.send(\"\\n\")\n client.send(\" | \".join(commands))\n client.send(\"\\n\")", "def show_movement(show_this_menu, flash):\n if show_this_menu:\n for i in range(0, 6):\n if i != flash:\n mvaddstr(i + 7, 1, MOVES[i], color_pair(OPTIONS_COLOUR) | A_BOLD)\n else:\n mvaddstr(i + 7, 1, MOVES[i], color_pair(RED) | A_BOLD)\n refresh()\n else:\n for i in range(0, 6):\n mvaddstr(i + 7, 1, SHORT_BLANK, color_pair(BLACK) | A_BOLD)", "def redraw_menu(self, event):\n self.appInit()\n self.redraw()", "def draw(self, draw_surface):\n draw_surface.blit(self.menu_frame, (140, 71))\n draw_surface.blit(self.cost_surf, end_at(self.cost_surf, (202, 87)))\n self.quantity_cursor.draw(draw_surface)", "def drawButtons(self):\n self.__pausedTitle.draw(self.__screen)\n self.__exitGameButton.draw(self.__screen)\n self.__resumeButton.draw(self.__screen)\n self.__mainMenuButton.draw(self.__screen)", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def draw_menu(self, items):\n\n self.count = len(items) - 1\n while True:\n if self.flag >= 1:\n if items[self.index] == str(BackButton.EXIT.name):\n print(\"exiting from system\")\n sys.exit()\n else:\n\n return items[self.index]\n\n for x in range(0, self.count+1):\n if x == self.index:\n print(Color.B_LightGray + Color.F_Black + items[x] + Base.END)\n elif x == self.count:\n print(Color.F_Red + items[x] + Base.END)\n else:\n print(items[x])\n with Listener(\n on_press=self.on_press\n ) as listener:\n listener.join()\n\n os.system('clear')\n\n # providing if statement in last to emulate do while loop\n if not always_true():\n break", "def draw_labels(self):\n x = PygameUI.Label('Inactive Events')\n x.frame = pygame.Rect(4, 4, 150, 30)\n self.scene.add_child(x)\n \n x = PygameUI.Label('Active Events')\n x.frame = pygame.Rect(Menu.scene.frame.w-150, 4, 150, 30)\n self.scene.add_child(x)", "def loadMenu(self):\r\n show_empty_root_items = pos.config['menu', 'show_empty_root_items']\r\n show_disabled_items = pos.config['menu', 'show_disabled_items']\r\n self.mainToolbook.AssignImageList(pos.menu.il)\r\n \r\n for root in pos.menu.main.items:\r\n if not root.enabled and not show_disabled_items:\r\n continue\r\n enabled_children = [i for i in root.children if i.enabled]\r\n if show_disabled_items:\r\n children = root.children\r\n else:\r\n children = enabled_children\r\n # Hide empty menu root items\r\n if len(children) == 0 and not show_empty_root_items:\r\n continue\r\n page = self.getToolbookPage(children)\r\n self.mainToolbook.AddPage(imageId=root.image, page=page, select=False, text=root.label)\r\n page.Enable(root.enabled)# and len(enabled_children) != 0)\r", "def menu_screen(win):\n\tpass", "def selector(self):\n if self.selectedUnit:\n if not self.map.hasUnselectedUnitAt(self.pos):\n self.menu = Menu.Menu([], MENU_POSITION)\n #self.menuGroup.add(self.menu)\n self.selectedUnit.setNeighbors(self.map.getNeighbors(self.selectedUnit))\n if self.selectedUnit.hasUnfriendlyNeighbors():\n self.menu.add(Menu.MenuComponent(\" Attack\", self.startAttackMode))\n if self.selectedUnit.canCapture(self.pos):\n self.menu.add(Menu.MenuComponent(\" Capture\", lambda: self.capture(self.selectedUnit, self.pos)))\n self.menu.add(Menu.MenuComponent(\" Wait\", self.deselectUnit))\n self.menu.add(Menu.MenuComponent(\" Cancel\", self.cancelMove))\n self.menuMode = True\n else:\n self.selectSpace()", "def showRightClickMenu(self,pos):\n\t\tprint('bStackWidget.showRightClickMenu()')\n\t\tmenu = QtWidgets.QMenu()\n\t\t#self.menu = QtWidgets.QMenu()\n\n\t\tnumChannels = self.mySimpleStack.numChannels # number of channels in stack\n\t\tmaxNumChannels = self.mySimpleStack.maxNumChannels\n\t\t#actions = ['Channel 1', 'Channel 2', 'Channel 3', 'RGB', 'Channel 1 Mask', 'Channel 2 Mask', 'Channel 3 Mask']\n\t\tprint(' showRightClickMenu() numChannels:', numChannels, 'maxNumChannels:', maxNumChannels)\n\t\tactionsList = []\n\t\tisEnabledList = []\n\t\tisCheckedList = []\n\t\t# abb oct 2020, maybe put these back in\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber}')\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(chanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == chanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} Mask')\n\t\t\tactualChanNumber = maxNumChannels + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} Skel')\n\t\t\tactualChanNumber = 2 * maxNumChannels + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\n\t\t# abb oct 2020, maybe put this back in ???\n\t\t'''\n\t\tif numChannels>1:\n\t\t\tactionsList.append('RGB')\n\t\t\tisEnabledList.append(True)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == 'rgb' # lower case !!!\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\n\t\tfor i, actionStr in enumerate(actionsList):\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisEnabled = isEnabledList[i]\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == i+1\n\t\t\tisChecked = isCheckedList[i]\n\n\t\t\tcurrentAction.setEnabled(isEnabled)\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\t# add to menu\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\t#\n\t\t# do again for edt\n\t\tedtIdx = 3 # (raw==0, mask==1, skel==2, edt==3)\n\t\tactionsList = []\n\t\tisEnabledList = []\n\t\tisCheckedList = []\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} EDT')\n\t\t\tactualChanNumber = (maxNumChannels * edtIdx) + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tprint(' edt actualChanNumber:', actualChanNumber, 'isEnabled:', isEnabled)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\tfor i, actionStr in enumerate(actionsList):\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisEnabled = isEnabledList[i]\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == i+1\n\t\t\tisChecked = isCheckedList[i]\n\n\t\t\tcurrentAction.setEnabled(isEnabled)\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\t# add to menu\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\t#\n\t\tmenu.addSeparator()\n\n\t\t#\n\t\t# view\n\t\t# abb oct 2020, maybe put these back in ???\n\t\t#actions = ['Image', 'Sliding Z', 'Nodes', 'Edges']\n\t\tactions = ['Image']\n\t\tfor actionStr in actions:\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisChecked = False\n\t\t\tif actionStr == 'Image':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showImage']\n\t\t\telif actionStr == 'Sliding Z':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['displaySlidingZ']\n\t\t\telif actionStr == 'Nodes':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showNodes']\n\t\t\telif actionStr == 'Edges':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showEdges']\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\tcurrentAction.triggered.connect(self.actionHandler)\n\t\t\t# add to menu\n\t\t\t#menuAction = self.menu.addAction(currentAction)\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\tmenu.addSeparator()\n\n\t\t#\n\t\t# panels\n\n\t\t'''\n\t\tannotationsAction = QtWidgets.QAction('Left Toolbar', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showLeftToolbar'])\n\t\t#annotationsAction.setShortcuts('[')\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# nodes\n\t\tannotationsAction = QtWidgets.QAction('Node List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showNodeList'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# edges\n\t\tannotationsAction = QtWidgets.QAction('Edge List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showEdgeList'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# search\n\t\tannotationsAction = QtWidgets.QAction('Search List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showSearch'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# annotations\n\t\tannotationsAction = QtWidgets.QAction('Annotation List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showAnnotations'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# contrast\n\t\tcontrastAction = QtWidgets.QAction('Contrast Panel', self, checkable=True)\n\t\tcontrastAction.setChecked(self.options['Panels']['showContrast'])\n\t\ttmpMenuAction = menu.addAction(contrastAction)\n\t\t'''\n\n\t\t'''\n\t\t# status toolbar\n\t\tannotationsAction = QtWidgets.QAction('Status Panel', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showStatus'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# line profile toolbar\n\t\tannotationsAction = QtWidgets.QAction('Line Profile Panel', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showLineProfile'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t# napari\n\t\tmenu.addSeparator()\n\t\tnapariAction = QtWidgets.QAction('Napari', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(napariAction)\n\n\t\tmenu.addSeparator()\n\t\t# make square\n\t\tmakeSquareAction = QtWidgets.QAction('Square', self, checkable=True)\n\t\tmakeSquareAction.setChecked(False)\n\t\ttmpMenuAction = menu.addAction(makeSquareAction)\n\n\t\tmenu.addSeparator()\n\n\t\t# save image\n\t\tsaveImageAction = QtWidgets.QAction('Save Image', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(saveImageAction)\n\n\t\t# save movie\n\t\tsaveMovieAction = QtWidgets.QAction('Save Movie', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(saveMovieAction)\n\n\t\t# options\n\t\t'''\n\t\tmenu.addSeparator()\n\t\toptionsAction = QtWidgets.QAction('Options', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(optionsAction)\n\t\t'''\n\n\t\t# refresh tracing\n\t\tmenu.addSeparator()\n\t\trefeshAction = QtWidgets.QAction('Refresh', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(refeshAction)\n\n\t\t#\n\t\t# edits\n\t\tself.addEditMenu(menu)\n\n\t\t#\n\t\t# get the action selection from user\n\n\t\tprint('=== bStackWidget.showRightClickMenu()')\n\t\t# was this\n\t\tuserAction = menu.exec_(self.mapToGlobal(pos))\n\t\t# now this\n\t\t'''\n\t\tself.menu.move(self.mapToGlobal(pos))\n\t\tself.menu.show()\n\t\t'''\n\n\t\t#userAction = None\n\t\tif userAction is None:\n\t\t\t# abort when no menu selected\n\t\t\treturn\n\t\tuserActionStr = userAction.text()\n\t\tprint(' showRightClickMenu() userActionStr:', userActionStr)\n\t\tsignalName = 'bSignal ' + userActionStr\n\t\tuserSelectedMenu = True\n\n\t\tdoStackRefresh = False\n\n\t\t# image\n\t\tmaxNumChannels = self.mySimpleStack.maxNumChannels\n\t\tif userActionStr == 'Channel 1':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 1\n\t\t\t#doStackRefresh = True\n\t\t\tself.optionsChange('Panels', 'displayThisStack', value=1, doEmit=True)\n\t\t\t#self.getStackView().displayStateChange('displayThisStack', value=1)\n\t\telif userActionStr == 'Channel 2':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=2)\n\t\telif userActionStr == 'Channel 3':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 3\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=3)\n\n\t\telif userActionStr == 'Channel 1 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4)\n\t\telif userActionStr == 'Channel 2 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4+1\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4+1)\n\t\telif userActionStr == 'Channel 3 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4+2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4+2)\n\n\t\telif userActionStr == 'Channel 1 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7)\n\t\telif userActionStr == 'Channel 2 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7+1\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7+1)\n\t\telif userActionStr == 'Channel 3 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7+2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7+2)\n\n\t\t# EDT\n\t\telif userActionStr == 'Channel 1 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10)\n\t\telif userActionStr == 'Channel 2 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10+1)\n\t\telif userActionStr == 'Channel 3 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10+2)\n\n\n\t\telif userActionStr == 'RGB':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 'rgb'\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value='rgb')\n\n\t\t#\n\t\t# view of tracing\n\t\telif userActionStr == 'Image':\n\t\t\tself.getStackView().displayStateChange('showImage', toggle=True)\n\t\t\tdoStackRefresh = True\n\t\t\t#self.displayStateDict['showImage'] = not self.displayStateDict['showImage']\n\t\telif userActionStr == 'Sliding Z':\n\t\t\t#self.getStackView().displayStateDict['displaySlidingZ'] = not self.getStackView().displayStateDict['displaySlidingZ']\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displaySlidingZ', toggle=True)\n\t\telif userActionStr == 'Nodes':\n\t\t\t#optionsChange('Panels', 'showLeftToolbar', toggle=True, doEmit=True)\n\t\t\tself.getStackView().displayStateDict['showNodes'] = not self.getStackView().displayStateDict['showNodes']\n\t\t\tdoStackRefresh = True\n\t\telif userActionStr == 'Edges':\n\t\t\tself.getStackView().displayStateDict['showEdges'] = not self.getStackView().displayStateDict['showEdges']\n\t\t\tdoStackRefresh = True\n\n\t\t#\n\t\t# toolbars\n\t\telif userActionStr == 'Left Toolbar':\n\t\t\tself.optionsChange('Panels', 'showLeftToolbar', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showLeftToolbar'] = not self.options['Panels']['showLeftToolbar']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Contrast Panel':\n\t\t\tself.optionsChange('Panels', 'showContrast', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showContrast'] = not self.options['Panels']['showContrast']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Node List':\n\t\t\tself.optionsChange('Panels', 'showNodeList', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showNodeList'] = not self.options['Panels']['showNodeList']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Edge List':\n\t\t\tself.optionsChange('Panels', 'showEdgeList', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showEdgeList'] = not self.options['Panels']['showEdgeList']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Search List':\n\t\t\tself.optionsChange('Panels', 'showSearch', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showSearch'] = not self.options['Panels']['showSearch']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Annotation List':\n\t\t\tself.optionsChange('Panels', 'showAnnotations', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showSearch'] = not self.options['Panels']['showSearch']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Status Panel':\n\t\t\tself.optionsChange('Panels', 'showStatus', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showStatus'] = not self.options['Panels']['showStatus']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Line Profile Panel':\n\t\t\tself.optionsChange('Panels', 'showLineProfile', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showLineProfile'] = not self.options['Panels']['showLineProfile']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Caiman':\n\t\t\tself.optionsChange('Panels', 'showCaiman', toggle=True, doEmit=True)\n\n\t\t# other\n\t\telif userActionStr == 'Options':\n\t\t\toptionsDialog = bimpy.interface.bOptionsDialog(self, self)\n\t\telif userActionStr == 'Napari':\n\t\t\tself.openNapari()\n\t\telif userActionStr == 'Square':\n\t\t\tself.myStackView2.toggleMakeSquare()\n\t\t\t#self.resizeEvent(QtGui.QResizeEvent(self.size(), QtCore.QSize()))\n\t\t\t#self.repaint()\n\t\telif userActionStr == 'Save Image':\n\t\t\tself.saveImage()\n\t\telif userActionStr == 'Save Movie':\n\t\t\tself.saveMovie()\n\t\telif userActionStr == 'Refresh':\n\t\t\tself.getStackView()._preComputeAllMasks()\n\n\t\telse:\n\t\t\tprint(' showRightClickMenu() -->> no action taken for userActionStr:', userActionStr)\n\t\t\tuserSelectedMenu = False\n\n\t\t# emit a signal\n\t\t# todo: this is emitting when self.getStackView().displayStateDict is not changing, e.g. for user action 'Contrast' and 'Annotations'\n\t\t'''\n\t\tif userSelectedMenu:\n\t\t\tself.setSlice() # update\n\t\t\tself.displayStateChangeSignal.emit(signalName, self.getStackView().displayStateDict)\n\t\t'''\n\n\t\tif doStackRefresh:\n\t\t\tself.getStackView().setSlice()\n\n\t\t#return False\n\t\t#print('right click menu return')\n\t\treturn", "def display_menu(grid_display, button_list, text_color, screen_width, screen_height, cycle_img, cycle_rect):\n\n draw_menu_text(grid_display, text_color, screen_width, screen_height)\n\n animate_menu(grid_display, cycle_img, cycle_rect, screen_width*.2,\n screen_width*.8, 2, Color(\"Blue\"))\n\n for button in button_list:\n if button.info != \"Back\" and button.info != \"Continue\" and button.info != \"Reset\" and \\\n button.info != \"Menu\":\n button.draw_button(grid_display)\n button.set_click()", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def draw(self, screen):", "def draw(self, window):\n if self.selected:\n self.menu.draw(window) #Drawing menu\n window.blit(kill_count_table, (self.x + self.width // 2 - 15, self.y - self.height // 2 + 35))\n kills = self.font.render(str(self.kill_count) + \" Kills\", 1, (255, 255, 255))\n window.blit(kills, (self.x + self.width // 2 + 5, self.y - self.height // 2 + 43))\n\n tower_image = self.tower_images[self.level-1]\n\n if not self.level_up_animation: #Always draw the tower except when leveling up\n window.blit(tower_image, (self.x - tower_image.get_width() // 2, self.y - tower_image.get_height() // 2))\n\n else: #Leveling up animation procedure\n window.blit(self.level_up[self.level_animation // 2], (self.x - tower_image.get_width() - 75, self.y - 225))\n self.level_animation += 1\n if self.level_animation == len(level_up) * 2:\n self.level_up_animation = False\n self.level_animation = 0", "def display(self):\n stroke(51)\n fill(self.couleur)\n rect(self.pos_x, 0, self.largeur, self.min_y)\n rect(self.pos_x, self.min_y + self.hauteur, self.largeur, util.SCREEN_Y-(self.min_y + self.hauteur))", "def draw(self, screen):\r\n if self.selected:\r\n used_color = (255 - self.color[0], 255 - self.color[1], 255 - self.color[2])\r\n else:\r\n used_color = self.color\r\n pygame.draw.rect(screen, used_color,\r\n (self.location_top_left[0], self.location_top_left[1], self.size_x, self.size_y), 0)", "def show_menu(stdscr, choice=0):\n stdscr.clear()\n curses.curs_set(False)\n stdscr.addstr(\"*** --- Interface de chiffrement --- ***\\n\\n\")\n if choice == 1:\n stdscr.addstr(\"->1<- Chiffrement symétrique avec Threefish\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->1<- Chiffrement symétrique avec Threefish\\n\")\n if choice == 2:\n stdscr.addstr(\"->2<- Chiffrement de Cramer-Shoup\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->2<- Chiffrement de Cramer-Shoup\\n\")\n if choice == 3:\n stdscr.addstr(\"->3<- Hashage d'un fichier\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->3<- Hashage d'un fichier\\n\")\n if choice == 4:\n stdscr.addstr(\"->4<- Déchiffrement symétrique avec Threefish\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->4<- Déchiffrement symétrique avec Threefish\\n\")\n if choice == 5:\n stdscr.addstr(\"->5<- Déchiffrement de Cramer-Shoup\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->5<- Déchiffrement de Cramer-Shoup\\n\")\n if choice == 6:\n stdscr.addstr(\"->6<- Vérification du hash\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->6<- Vérification du hash\\n\")\n if choice == 7:\n stdscr.addstr(\"->q<- Pour quitter\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->q<- Pour quitter\\n\")\n stdscr.refresh()", "def draw(self, screen):\n if self.state == self.S_ACTIVE:\n screen.blit(self.image, self.rect)", "def draw(self, x, y):\r\n for w in self.widgets:\r\n if w.visible:\r\n w.draw()\r\n self.pointer.position(x + self.p_dx, y + self.p_dy, 0.5)\r\n self.pointer.draw()", "def draw(self):", "def main_menu_buttons(self):\n button_start_position = self.draw_button(MAIN_MENU_BUTTON['button_start']['image'],\n MAIN_MENU_BUTTON['button_start']['y'])\n button_exit_position = self.draw_button(MAIN_MENU_BUTTON['button_exit']['image'],\n MAIN_MENU_BUTTON['button_exit']['y'])\n\n if button_start_position[0] + button_start_position[2] > self.mouse[0] > button_start_position[0] and \\\n button_start_position[1] + button_start_position[3] > self.mouse[1] > button_start_position[1]:\n\n pygame.mouse.set_cursor(*pygame.cursors.diamond)\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[4], MAIN_MENU_BUTTON['button_start']['y'])\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[1], MAIN_MENU_BUTTON['button_exit']['y'])\n\n for self.click in pygame.event.get():\n if self.click.type == pygame.MOUSEBUTTONDOWN and self.click.button == 1:\n self.surface.fill((30, 30, 30))\n pygame.mouse.set_cursor(*pygame.cursors.tri_left)\n self.switch_scene = True\n\n elif button_exit_position[0] + button_exit_position[2] > self.mouse[0] > button_exit_position[0] and\\\n button_exit_position[1] + button_exit_position[3] > self.mouse[1] > button_exit_position[1]:\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[0], MAIN_MENU_BUTTON['button_start']['y'])\n pygame.mouse.set_cursor(*pygame.cursors.diamond)\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[5], MAIN_MENU_BUTTON['button_exit']['y'])\n\n for self.click in pygame.event.get():\n if self.click.type == pygame.MOUSEBUTTONDOWN and self.click.button == 1:\n self.isrunning = False\n\n else:\n # set standard cursor\n pygame.mouse.set_cursor(*pygame.cursors.tri_left)", "def drawScreen(self, player, AI, maze):\n self.showScreen(maze, self.bananaIcon)\n player.drawCreature(self.screen, self.TILE_SIZE)\n AI.drawCreature(self.screen, self.TILE_SIZE)\n if self.popup:\n self.ItemPickedUpPopUp()\n pygame.display.update()", "def update_menu(self):\n x_pos = (self.menu_x - self.block_x) // 2 + self.offset_x\n y_pos = self.offset_y + 50 # account for bottom text\n self.menu_sprites[0].image = self.sprite_types[self.curr_sprite]\n for sprite in self.menu_sprites:\n sprite.x = x_pos\n sprite.y = y_pos + sprite.image.height\n sprite.update()\n y_pos += self.offset_y + sprite.image.height", "def show(self,canvas): \n for piece in self.bluh:\n piece.render(canvas)\n\n #create vertical and horizontal bold outline\n for i in range(len(self.board)+1):\n x0=300+self.piecesize*i\n y0=100\n x1=300+self.piecesize*i\n y1=900\n canvas.create_line(x0,y0,x1,y1,width=5,fill=self.mode.color1)\n for a in range(len(self.board)+1):\n for i in range(len(self.board)+1):\n x2=300\n y2=100+self.piecesize*i\n x3=1100\n y3=100+self.piecesize*i\n canvas.create_line(x2,y2,x3,y3,width=5,fill=self.mode.color1)\n for piece in self.bluh:\n if piece.isselected==True:\n piece.dropShadow(canvas)\n piece.render(canvas)\n #print(piece.__repr__())", "def draw():\n screen.fill((0, 0, 0))\n alien.draw()", "def draw(self, draw_surface):\n # If their is an active select count sub event pass control to it.\n if self.select_count_event is not None:\n self.select_count_event.draw(draw_surface)\n else:\n # Draw the background menu.\n super().draw(draw_surface)\n\n # Draw the items and prices.\n draw_surface.blit(self._item_surface, (0, 0))\n\n # Draw the selected item description and image\n draw_surface.blit(self.item_desc_surf, (40, 115))\n draw_surface.blit(self.item_pic_surf, (8, 124))\n\n # Draw the cursors.\n self.draw_cursor.draw(draw_surface)\n self.down_bobbing_cursor.draw(draw_surface)\n self.up_bobbing_cursor.draw(draw_surface)\n\n # Also draw subevents if they exist.\n if self.confirm_buy_response is not None:\n self.confirm_buy_response.draw(draw_surface)\n elif self.thanks_dialogue is not None:\n self.thanks_dialogue.draw(draw_surface)", "def isVisible(self):\n\t\treturn True", "def draw_frame(self):\n if self.redraw:\n self.window.view.fill((0, 0, 0))\n self.menuview.fill(config.menubackcolor)\n\n columns = config.columns\n colwidth = self.textarea.get_width() / columns\n srow = self.selected % self.rows\n scol = self.selected / self.rows\n\n # adjust offset to within (columns) of col\n self.col_offset = min(scol, max(self.col_offset, scol - columns + 1))\n\n # render and blit each column of options that is showing\n # TODO: render all columns at init, and rerender only if font size or row count changes\n for c, col in enumerate(range(self.col_offset, columns)):\n opts = self.options[self.rows * col:self.rows * (col + 1)]\n opttext = self.font.render('\\n'.join(opt[0] for opt in opts),\n charheight=self.cheight, lineheight=self.rheight,\n tracking=1, color=config.menufontcolor)\n self.textarea.blit(opttext, (c * colwidth + self.cheight, 0))\n\n # blit marker\n mmargin = self.cheight / 4\n self.textarea.blit(self.marker, ((scol - self.col_offset) * colwidth + mmargin,\n srow * self.rheight + mmargin))\n\n self.redraw = False", "def create_menu():\n MenuData = [\n (\"&Draw Variables\",drawable.ask),\n (\"&Show Variables\",printall),\n (\"&Print Variables\",printval),\n (\"&Edit Variable\",edit),\n (\"&Rename Variable\",rename),\n (\"&Forget Variables\",forget),\n (\"---\",None),\n (\"&Create Plane\",\n [(\"Coordinates\", \n [(\"Point and normal\", createPlaneCoordsPointNormal),\n (\"Three points\", createPlaneCoords3Points),\n ]), \n (\"Visually\", \n [(\"Three points\", createPlaneVisual3Points),\n ]),\n ]),\n (\"&Select Plane\",planes.ask),\n (\"&Draw Selection\",planes.draw),\n (\"&Forget Selection\",planes.forget),\n (\"---\",None),\n (\"&Pick Actors\",pick_actors),\n (\"&Pick Elements\",pick_elements),\n (\"&Pick Points\",pick_points),\n (\"&Pick Edges\",pick_edges),\n (\"---\",None),\n ('&Selection',\n [('&Create Report',report_selection),\n ('&Set Property',setprop_selection),\n ('&Grow',grow_selection),\n ('&Partition',partition_selection),\n ('&Get Partition',get_partition),\n ('&Export',export_selection),\n ]),\n (\"---\",None),\n ('&Query',\n [('&Actors',query_actors),\n ('&Elements',query_elements),\n ('&Points',query_points),\n ('&Edges',query_edges),\n ('&Distances',query_distances),\n ]),\n (\"---\",None),\n (\"&Close\",close_menu),\n ]\n return widgets.Menu('Tools',items=MenuData,parent=GD.gui.menu,before='help')", "def show(self, display):\n if self.visible == True:\n pg.draw.rect(display, self.bgColor, self.panel)\n\n for element in self.elements:\n element.show(display)", "def keyboard_menu_control(self, app):\n mx, my = pg.mouse.get_pos()\n click = False\n\n menu_view = self.get_view.menu_view\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n if menu_view.menu_button.collidepoint((mx, my)):\n if click:\n app.menu_view_running = False\n\n if menu_view.quit_button.collidepoint((mx, my)):\n if click:\n pg.quit()\n sys.exit(0)", "def create_menus( self ):", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def ToggleDrawingTools(self, event):\n pass", "def draw(self):\n NavigationToolbar2.draw(self)\n\n self._myParent.evt_view_updated()\n\n return", "def draw(self, draw_surface):\n Dialogue.draw(self, draw_surface)\n if self.response_menu is not None:\n draw_surface.blit(self.response_menu, (1, 1))\n self.cursor.draw(draw_surface)", "def ensure_visible(self):\n self.set_visible(True)", "def __editorShowMenu(self, menuName, menu, editor):\n if menuName == \"Tools\":\n if self.__menu.menuAction() not in menu.actions():\n # Re-add our menu\n self.__editors[editor] = []\n if not menu.isEmpty():\n act = menu.addSeparator()\n self.__editors[editor].append(act)\n act = menu.addMenu(self.__menu)\n self.__editors[editor].append(act)", "def draw(self):\n\n for item in self.vis:\n item.undraw()\n self.render()\n for item in self.vis:\n item.draw(self.win)\n self.drawn = True", "def show( self ):\n if self.visible == 1:#ohnheiser hack and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()", "def showWindowMenu(self, windowMenu):\n raise RuntimeError('Not implemented')" ]
[ "0.7788359", "0.75005126", "0.74821126", "0.7407488", "0.7399661", "0.7335653", "0.7126484", "0.70524067", "0.7018847", "0.6975471", "0.6741928", "0.6728081", "0.6551245", "0.6539362", "0.6494997", "0.6488301", "0.648804", "0.64400756", "0.6423215", "0.6423037", "0.641454", "0.6393461", "0.6383666", "0.63691574", "0.63682425", "0.63653517", "0.63555336", "0.63466656", "0.63348377", "0.6325595", "0.62295806", "0.62264633", "0.62149584", "0.6211438", "0.617851", "0.616426", "0.6120658", "0.61202693", "0.6108227", "0.60966295", "0.6088454", "0.60749197", "0.60328245", "0.60317796", "0.60316336", "0.6031528", "0.6016452", "0.6010056", "0.59869766", "0.5984119", "0.59838283", "0.5980749", "0.597639", "0.5959601", "0.59569293", "0.5955334", "0.59492296", "0.5933388", "0.59319", "0.5931389", "0.5926294", "0.59181017", "0.59181017", "0.5914927", "0.590969", "0.59078693", "0.5904778", "0.59024256", "0.5896864", "0.5894379", "0.58941025", "0.5890756", "0.5888413", "0.5886575", "0.588467", "0.58738047", "0.58707", "0.5859644", "0.5857935", "0.58578783", "0.5857811", "0.5856381", "0.58535546", "0.5847419", "0.58464694", "0.5845602", "0.58409774", "0.5836207", "0.58263046", "0.5815959", "0.5815234", "0.5806221", "0.57914174", "0.578962", "0.5786393", "0.5784167", "0.57817656", "0.5779519", "0.5778607", "0.57762223" ]
0.68641776
10
Get the pixel positions for positioning a menu in the center of the screen
def get_menu_coords(self, menu): menu_center_x = (self.width // 2) menu_center_y = (self.height // 2) # get a mapping of the menu co-ordinates for relative positioning of things inside the menu menu_cords = ( (menu_center_x - (menu.width // 2), menu_center_y + (menu.height // 2)), (menu_center_x + (menu.width // 2), menu_center_y + (menu.height // 2)), (menu_center_x - (menu.width // 2), menu_center_y - (menu.height // 2)), (menu_center_x + (menu.width // 2), menu_center_y - (menu.height // 2)), ) return menu_center_x, menu_center_y, menu_cords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMenuItemPixels(cls):\n return cls.menuItemPixels", "def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]", "def screen_coordinates(pos):\n\n return [int((pos[0] % screen_width) / px), screen_height - int((pos[1] % screen_height) / px)]", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m", "def get_center(self):\n x = round(self.x_pos)\n y = round(self.y_pos)\n return [int(x),int(y)]", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def center(self):\n return (self.centerx, self.centery)", "def get_pos(self) -> tuple:\n return self.rect.center", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def calculate_screen_position(self):\r\n\r\n character_select_start_y = 604\r\n character_select_end_y = 646\r\n\r\n if self.slotNumber <= 6:\r\n start_y = 585 # 595\r\n end_y = 627 # 637\r\n x_hero_number = self.slotNumber\r\n else:\r\n start_y = 300 # 290\r\n end_y = 342 # 332\r\n x_hero_number = self.slotNumber - 6\r\n\r\n start_x = 249 + (x_hero_number * 192)\r\n end_x = 326 + (x_hero_number * 192)\r\n\r\n self.screenPositionCharacterSelect = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": character_select_start_y,\r\n \"end_y\": character_select_end_y\r\n }\r\n self.screenPositionTab = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": start_y,\r\n \"end_y\": end_y\r\n }", "def center(self):\n xc = (self.x.max() + self.x.min())/2.\n yc = (self.y.max() + self.y.min())/2.\n return (xc, yc)", "def get_pos_in_pixels(self):\n pixelpos = Vector(self.pos.x * 32, -self.pos.y * 32)\n return pixelpos + self.offset", "def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery", "def get_pos(self):\n return self.rect.midtop", "def midtop(self):\n return (self.centerx, self.top)", "def get_center_scr(self):\r\n return self.rect.center", "def from_screen_coordinates(pos):\n\n return [float(pos[0]) * px, float(screen_height - pos[1]) * px]", "def get_center_coordinates(game):\n \n return math.ceil(game.height / 2), math.ceil(game.width / 2)", "def calculate_window_position(self):\n self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2\n self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2", "def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)", "def get_aa_pos_on_screen(self,position,frame):\n position=position*3+float(frame)-1\n x,y=self.get_base_pos_on_screen(position)\n y=y+20.0+float(frame)*15.0\n return x,y", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def center_mario(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x, self.y = float(self.rect.x), float(self.rect.y)", "def dimscr(self):\n return (self.startx, self.starty, self.endx - self.startx, self.endy - self.starty)", "def center(self):\n return self.center_x, self.center_y", "def visible_area(self):\n # looks like zeach has a nice big screen\n half_viewport = Vec(1920, 1080) / 2 / self.scale\n top_left = self.world.center - half_viewport\n bottom_right = self.world.center + half_viewport\n return top_left, bottom_right", "def _get_frame_center(self, src):\n w, h = get_size(src)\n x = w / 2\n y = h / 2\n\n return x, y", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def center(self):\n return [self.position[i]+self.radius for i in range(2)]", "def centers(self):\n return self.xc, self.yc", "def center(self):\n return self.pos + self.axis / 2.0", "def center(self):\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]", "def center_ava(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def get_grid_position(self):\n tile_size_x = constants.WINDOW_WIDTH / constants.GRID_TILE_LENGTH\n tile_size_y = constants.WINDOW_HEIGHT / constants.GRID_TILE_LENGTH\n grid_x = tile_size_x / self.host.x\n grid_y = tile_size_y / self.host.y\n return grid_x, grid_y", "def get_pos(self):\n return [self.row, self.col]", "def getPos(self):\n return self.Xpos,self.Ypos", "def calculate_center(self):\n return [(self.startX + self.endX) / 2., (self.startY + self.endY) / 2.]", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def _posToScreenCoords(self, pos):\n camLim_x = self.camCenter.x - self.camSize / 2\n camLim_y = self.camCenter.y - self.camSize / 2\n\n x = (self.screenSize[0] / self.camSize) * (pos.x - camLim_x)\n y = (self.screenSize[1] / self.camSize) * (pos.y - camLim_y)\n\n # Invert orientation of y\n y = self.screenSize[1] - y\n\n return int(x), int(y)", "def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col", "def show_mouse_position_with_px(self):\n self.main_menu_greets_fonts = pygame.font.Font(os.path.join(PATH_TO_RESOURCE, 'font_forever.ttf'), 10)\n self.positiontext(f'Mouse position {pygame.mouse.get_pos()}', (770, 20))\n self.mouse = pygame.mouse.get_pos()\n return self.mouse", "def _getTopCenterIndices(self, resolution, rectangular):\n # get x, y indices to get away from the ring basis.\n # indices starts with (0, 0) in the middle, with (r2, p1) -> (1, 0), etc. (x is on the pos 1 ray)\n\n numAxialLevels = 2 * resolution\n xi, yi = self.indices()\n if rectangular:\n topCenterI = 2 + (3 * resolution) * xi\n else:\n # 4*d b/c each increase in xi moves you back by numstacks/2\n topCenterI = 1 + (4 * resolution) * xi + (yi * numAxialLevels)\n topCenterJ = 1 + xi * numAxialLevels // 2 + numAxialLevels * yi\n return topCenterI, topCenterJ", "def get_center(self) -> Tuple[int, int]:\n raise NotImplementedError()", "def getSearchSpaceCoords(self):", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def _centered_coords(self):\n\n array = self.detection_base.height_model.array\n height, width = self.detection_base.height_model.cell_size_x * array.shape[0], \\\n self.detection_base.height_model.cell_size_y * array.shape[1]\n min_x, min_y = self.detection_base.height_model._bounding_box[0], \\\n self.detection_base.height_model._bounding_box[2]\n\n coords = self.detection_base._coords_array_single\n coords[:, 0], coords[:,1] = coords[:, 0] - (min_x + (width / 2)), coords[:,1] - (min_y + (height / 2))\n return coords", "def convert_mousepos(self, pos):\n tokenx, tokeny = pos\n row = int((tokenx - self.x_margin) / SPACESIZE)\n column = int((tokeny - self.y_margin) / SPACESIZE)\n return column, row", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def adjust_position(self):\n\n # Adjust position for x-axis\n r = self.rect.x % 30\n if r != 0:\n if r <= 16:\n x = self.rect.x - r\n else:\n x = self.rect.x + (30 - r)\n\n else:\n x = self.rect.x\n\n # Adjust position for y-axis\n r = self.rect.y % 30\n if r != 0:\n if r <= 16:\n y = self.rect.y - r\n else:\n y = self.rect.y + (30 - r)\n else:\n y = self.rect.y\n\n return x, y", "def getLEDPos(self, nx, ny, centre, wLen):\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]\n return pos", "def eval_screen_size():\n center_x = 32 // 2 * app_manager.get_map_width()\n center_y = 32 // 2 * app_manager.get_map_height()\n\n loc1_le = EPD(0x58DC60)\n loc1_te = EPD(0x58DC60 + 4)\n loc1_re = EPD(0x58DC60 + 8)\n loc1_be = EPD(0x58DC60 + 12)\n\n # screen position and location\n loc1_lv = f_dwread_epd(loc1_le)\n loc1_tv = f_dwread_epd(loc1_te)\n loc1_rv = f_dwread_epd(loc1_re)\n loc1_bv = f_dwread_epd(loc1_be)\n prev_sx = f_dwread_epd(EPD(0x0062848C))\n prev_sy = f_dwread_epd(EPD(0x006284A8))\n\n # centerview and update x, y\n SeqCompute([\n (loc1_le, SetTo, center_x),\n (loc1_te, SetTo, center_y),\n (loc1_re, SetTo, center_x),\n (loc1_be, SetTo, center_y)])\n f_dwwrite_epd(loc1_le, center_x)\n f_dwwrite_epd(loc1_te, center_y)\n f_dwwrite_epd(loc1_re, center_x)\n f_dwwrite_epd(loc1_be, center_y)\n DoActions(CenterView(1))\n cur_sx = f_dwread_epd(EPD(0x0062848C))\n cur_sy = f_dwread_epd(EPD(0x006284A8))\n\n # get size\n dx = center_x - cur_sx\n dy = center_y - cur_sy\n\n # restore screen\n screen_x = prev_sx + dx\n screen_y = prev_sy + dy\n SeqCompute([\n (loc1_le, SetTo, screen_x),\n (loc1_te, SetTo, screen_y),\n (loc1_re, SetTo, screen_x),\n (loc1_be, SetTo, screen_y)])\n DoActions(CenterView(1))\n\n # restore location\n SeqCompute([\n (loc1_le, SetTo, loc1_lv),\n (loc1_te, SetTo, loc1_tv),\n (loc1_re, SetTo, loc1_rv),\n (loc1_be, SetTo, loc1_bv)])\n\n EUDReturn([dx*2, dy*2])", "def center(self) -> Tuple[float, float]:\n return self.x + self.width / 2, self.y + self.height / 2", "def to_map_pos(self, screen_pos):\n return screen_pos + self.player_pos - SCREEN.size // 2", "def xy_to_element(self, pos: tuple) -> tuple:\n element_pos_x = pos[0] - c.SCREEN_PADDING\n if pos[1] < c.CANVAS_START_Y:\n screen_element = self.menu\n element_pos_y = pos[1] - c.SCREEN_PADDING\n else:\n screen_element = self.grid\n element_pos_y = pos[1] - (c.CANVAS_START_Y)\n return (screen_element, (element_pos_x, element_pos_y))", "def center(self) -> Tuple[int, int]:\n center_x = int((self.x1 + self.x2) // 2)\n center_y = int((self.y1 + self.y2) // 2)\n return (center_x, center_y)", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def centerx(self):\n return self.left + self.width / 2", "def center(self):\n\n return (\n self.x() + (self.width() / 2),\n self.y() + (self.height() / 2)\n )", "def initialCoordinates():\r\n return (-250,-250)", "def pixel_centre(self):\n return tuple(self._pixel_centre)", "def get_machinekit_position():\n return settings.controller.axes_position()", "def center(self):\n return (self.upper_right + self.lower_left) * 0.5", "def get_position(self, position, size=(0, 0)):\n size = Position(size[0] / 2, size[1] / 2)\n relative_to_focus = position - self.focus # space coordinates\n position_on_canvas = (relative_to_focus * self.scale -\n self.offset.invert_y() - size.invert_y())\n return position_on_canvas.invert_y().x_y\n # return ((self.offset + position - self.focus) * self.scale).x_y", "def centered_tile(self):\n\n coord_x = int(self.hbar.value() + self.hbar.pageStep()/2)\n coord_y = int(self.vbar.value() + self.vbar.pageStep()/2)\n return self.scene_to_ingame(coord_x, coord_y)", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def offset(self) -> Tuple[int, int]:\n return (self.ioffset[0].to_pixels(self.parent.width),\n self.ioffset[1].to_pixels(self.parent.height))", "def vert_center(self):\n return self._vert_center", "def vert_center(self):\n return self._vert_center", "def getPosicion(self):\r\n\t\treturn [self._x, self._y]", "def get_offset(tk_window):\n\n width_offset = int(\n (tk_window.winfo_screenwidth() / 2) - (tk_window.winfo_width() / 2)\n )\n\n height_offset = int(\n (tk_window.winfo_screenheight() / 2) - (tk_window.winfo_height() / 2)\n )\n\n return (width_offset, height_offset)", "def _center(pos, shift):\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2", "def getcellcenter(self,cellx,celly):\n xpos = self.xmargin + cellx*CELLSIZE + CELLSIZE/2\n ypos = self.ymargin + celly*CELLSIZE + CELLSIZE/2\n return (xpos,ypos)", "def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)", "def horiz_center(self):\n return self._horiz_center", "def horiz_center(self):\n return self._horiz_center", "def center(self):\n self.root.update_idletasks()\n w = self.root.winfo_screenwidth()\n h = self.root.winfo_screenheight()\n size = tuple(int(_) for _ in self.root.geometry().split('+')[0].split('x'))\n x = w/2 - size[0]/2\n y = h/2 - size[1]/2\n self.root.geometry(\"240x80+%d+%d\" % (x, y))", "def position(self):\n return self.x, self.y", "def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)", "def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly", "def position(self, x, y):\n if self.portrait:\n # HMSB\n index = (x + y * self.size[0]) >> 3\n offset = 7 - (x & 0x07)\n else:\n # VMSB\n index = (y >> 3) * self.size[0] + x\n offset = 7 - (y & 0x07)\n return index, offset", "def _get_viewport_offset(self):\n viewport_geometry = ManagedWindow.get_viewport_geometry()\n if not viewport_geometry:\n viewport_geometry = WindowGeometry()\n return {'x': viewport_geometry.x, 'y': viewport_geometry.y}", "def get_tile_location(self):\n if self.rect.x == 0:\n tile_x = 0\n elif self.rect.x % 32 == 0:\n tile_x = (self.rect.x / 32)\n else:\n tile_x = 0\n\n if self.rect.y == 0:\n tile_y = 0\n elif self.rect.y % 32 == 0:\n tile_y = (self.rect.y / 32)\n else:\n tile_y = 0\n\n return [tile_x, tile_y]", "def getPosition(self):\n\t\txxx1 = self.stokes()\n\t\txxx2 = self.thp()\n\t\txxx3 = self.tthp()\n\t\treturn [xxx1, xxx2, xxx3]", "def mousePosition(self):", "def update(self): \n super().update()\n if self.center_x < constants.left_limit:\n self.center_x = self.screen_width + constants.offscreen_space\n if self.center_x > self.screen_width + constants.offscreen_space:\n self.center_x = constants.left_limit\n if self.center_y > self.screen_height + constants.offscreen_space:\n self.center_y = constants.bottom_limit\n if self.center_y < constants.bottom_limit:\n self.center_y = self.screen_height + constants.offscreen_space", "def calculate_positions(self):\n return {cell: (cell.column, -cell.row) for cell in self.game.get_cells()}", "def topLeftCorner(self):\n self._updateExtents()\n return (self._mMinX,self._mMinY)", "def center(self):\n return self._lower + 0.5 * (self._upper - self._lower)", "def centery(self):\n return self.top + self.height / 2", "def margin(self) -> Tuple[int, int, int, int]:\n return (self.imargin[0].to_pixels(self.parent.width),\n self.imargin[1].to_pixels(self.parent.width),\n self.imargin[2].to_pixels(self.parent.height),\n self.imargin[3].to_pixels(self.parent.height))", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y" ]
[ "0.668973", "0.6660493", "0.65916294", "0.65874016", "0.65281975", "0.6518283", "0.64817595", "0.64489305", "0.64457977", "0.64295113", "0.64295113", "0.6412496", "0.64010537", "0.6398035", "0.6398035", "0.6398035", "0.6398035", "0.6364081", "0.63268316", "0.63001156", "0.62878335", "0.6257889", "0.62543863", "0.62534195", "0.6240144", "0.6215924", "0.618805", "0.6165505", "0.61486715", "0.6146912", "0.61403686", "0.61308616", "0.61247337", "0.61138403", "0.6107803", "0.60862416", "0.607043", "0.6048018", "0.603035", "0.6022833", "0.6002109", "0.5997267", "0.5993269", "0.5992107", "0.59832996", "0.598329", "0.5982859", "0.59762263", "0.5958555", "0.59564626", "0.5948855", "0.59466547", "0.5940092", "0.5933144", "0.5926575", "0.5901916", "0.5899396", "0.58974105", "0.5895464", "0.5891427", "0.58880603", "0.5872375", "0.58670235", "0.5865684", "0.58627003", "0.58563", "0.5847706", "0.5846303", "0.5836224", "0.5835479", "0.5835099", "0.58266324", "0.5824652", "0.5821616", "0.58180726", "0.5815165", "0.5815165", "0.5809518", "0.5808229", "0.5791067", "0.5766765", "0.5761054", "0.57604885", "0.57604885", "0.5757515", "0.57507265", "0.57471824", "0.57398295", "0.5720616", "0.5710489", "0.57072484", "0.5694084", "0.5692322", "0.5691024", "0.56908244", "0.5685761", "0.5679573", "0.5677983", "0.5676825", "0.5674646" ]
0.74038523
0
Draw a square for the button. If button is selected display an indicator (e.g. yellow triangle) next to it
def draw_button(self, button, relative_x, relative_y, menu_width, menu_height, is_selected): # adapted from http://arcade.academy/examples/gui_text_button.html#gui-text-button screen_button_center_x = (SCREEN_WIDTH - button.center_x - relative_x) screen_button_center_y = menu_height + (SCREEN_HEIGHT - button.center_y - relative_y) arcade.draw_rectangle_filled( screen_button_center_x, screen_button_center_y, button.width, button.height, COLOUR_MAP[button.face_color] ) if is_selected: selected_x = screen_button_center_x - (button.width // 2) - 25 selector_height = 10 selector_width = 16 arcade.draw_triangle_filled( selected_x, screen_button_center_y - selector_height, selected_x, screen_button_center_y + selector_height, selected_x + selector_width, screen_button_center_y, COLOUR_MAP[Colour.YELLOW.value] ) if not button.pressed: color = COLOUR_MAP[button.shadow_color] else: color = COLOUR_MAP[button.highlight_color] # Bottom horizontal arcade.draw_line(screen_button_center_x - button.width / 2, screen_button_center_y - button.height / 2, screen_button_center_x + button.width / 2, screen_button_center_y - button.height / 2, color, button.button_height) # Right vertical arcade.draw_line(screen_button_center_x + button.width / 2, screen_button_center_y - button.height / 2, screen_button_center_x + button.width / 2, screen_button_center_y + button.height / 2, color, button.button_height) if not button.pressed: color = COLOUR_MAP[button.highlight_color] else: color = COLOUR_MAP[button.shadow_color] # Top horizontal arcade.draw_line(screen_button_center_x - button.width / 2, screen_button_center_y + button.height / 2, screen_button_center_x + button.width / 2, screen_button_center_y + button.height / 2, color, button.button_height) # Left vertical arcade.draw_line(screen_button_center_x - button.width / 2, screen_button_center_y - button.height / 2, screen_button_center_x - button.width / 2, screen_button_center_y + button.height / 2, color, button.button_height) x = screen_button_center_x y = screen_button_center_y if not button.pressed: x -= button.button_height y += button.button_height arcade.draw_text(button.text, x, y, arcade.color.BLACK, font_size=button.font_size, width=button.width, align="center", anchor_x="center", anchor_y="center")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_button(self):\n # Draw the button's outline\n pg.draw.rect(self.screen, self.text_color, pg.Rect(self.rect.left - 1, self.rect.top - 1, self.rect.width + 2, self.rect.height + 2))\n\n # Draw the button\n pg.draw.rect(self.screen, self.button_color, self.rect)\n\n # Blit the button's text onto it\n self.screen.blit(self.txt_surface, self.txt_surface_rect)", "def draw_but(self, window):\n # draws the rectangular button\n p1 = graphics.Point(self.cen_point_x - self.width / 2, \n self.cen_point_y - self.height / 2)\n p2 = graphics.Point(self.cen_point_x + self.width / 2, \n self.cen_point_y + self.height / 2)\n self.button = graphics.Rectangle(p1, p2)\n self.button.setOutline(\"Orange\")\n self.button.draw(window)\n \n # draws the text on the button\n self.text.draw(window)", "def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_img, self.msg_img_rect)", "def draw_button(self):\r\n self.surface.fill(self.button_color, self.rect)\r\n self.surface.blit(self.msg_image, self.msg_image_rect)", "def draw(self):\n if self.is_clicked:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 0)\n else:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 1)", "def button(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n t.speed(20)\r\n t.penup()\r\n t.color(\"black\")\r\n # Draws one of the squares behind the \"scoreboard\"\r\n t.goto(70, 41)\r\n t.pendown()\r\n t.begin_fill()\r\n for i in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n t.penup()\r\n t.goto(70, 139)\r\n # Draws one of the squares over a button up arrow\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n t.penup()\r\n\r\n t.goto(190, 40)\r\n # Draws another one of the square around the enter button\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n\r\n t.penup()\r\n t.goto(70, -59)\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n # Draws the box around the down button\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n # Draws the up arrow of the button\r\n t.penup()\r\n t.goto(70,143)\r\n t.pendown()\r\n t.color(\"#8b8378\") # Turns the color a light grey\r\n t.begin_fill()\r\n for y in range(3):\r\n t.pendown()\r\n t.forward(100)\r\n t.left(120)\r\n t.end_fill()\r\n # Draws the down arrow of the button\r\n t.penup()\r\n t.goto(70, 40)\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(3):\r\n t.forward(100)\r\n t.right(120)\r\n t.end_fill()\r\n # Draws scoreboard\r\n t.penup()\r\n t.goto(75, 136)\r\n t.color(\"white\")\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(90)\r\n t.right(90)\r\n t.end_fill()\r\n t.color(\"black\")\r\n t.penup()\r\n t.goto(90,35)\r\n t.pendown()\r\n t.write(\"1\", font=(\"Arial\", 75, \"normal\") )\r\n t.color(\"#8b8378\") # Turns the color a light grey\r\n t.penup()\r\n # Draws the circle for the enter button and writes \"Enter\" on the button\r\n t.goto(240,50)\r\n t.begin_fill()\r\n t.circle(40)\r\n t.end_fill()\r\n t.penup()\r\n t.color(\"white\")\r\n t.goto(210,75)\r\n t.write(\"Enter\", font= (\"Arial\", 20, \"normal\"))\r\n t.color(\"white\")\r\n # Writes \"The Game of Nim\" at the bottom of the screen\r\n t.penup()\r\n t.goto(30, -140)\r\n t.pendown()\r\n t.write(\"The Game \", font=(\"Arial\", 40, \"normal\"))\r\n t.penup()\r\n t.goto(110, -185)\r\n t.write(\"of\", font = (\"Arial\", 40, \"normal\"))\r\n t.goto(70, -245)\r\n t.write(\"Nim\", font = (\"Arial\", 50, \"normal\"))", "def DrawPaneButton(self, dc, window, button, button_state, _rect, pane): \r\n \r\n if not pane:\r\n return\r\n \r\n if button == AUI_BUTTON_CLOSE:\r\n if pane.state & optionActive:\r\n bmp = self._active_close_bitmap\r\n else:\r\n bmp = self._inactive_close_bitmap\r\n\r\n elif button == AUI_BUTTON_PIN:\r\n if pane.state & optionActive:\r\n bmp = self._active_pin_bitmap\r\n else:\r\n bmp = self._inactive_pin_bitmap\r\n\r\n elif button == AUI_BUTTON_MAXIMIZE_RESTORE:\r\n if pane.IsMaximized():\r\n if pane.state & optionActive:\r\n bmp = self._active_restore_bitmap\r\n else:\r\n bmp = self._inactive_restore_bitmap\r\n else:\r\n if pane.state & optionActive:\r\n bmp = self._active_maximize_bitmap\r\n else:\r\n bmp = self._inactive_maximize_bitmap\r\n\r\n elif button == AUI_BUTTON_MINIMIZE:\r\n if pane.state & optionActive:\r\n bmp = self._active_minimize_bitmap\r\n else:\r\n bmp = self._inactive_minimize_bitmap\r\n\r\n isVertical = pane.HasCaptionLeft()\r\n \r\n rect = wx.Rect(*_rect)\r\n\r\n if isVertical:\r\n old_x = rect.x\r\n rect.x = rect.x + (rect.width/2) - (bmp.GetWidth()/2)\r\n rect.width = old_x + rect.width - rect.x - 1\r\n else:\r\n old_y = rect.y\r\n rect.y = rect.y + (rect.height/2) - (bmp.GetHeight()/2)\r\n rect.height = old_y + rect.height - rect.y - 1\r\n\r\n if button_state == AUI_BUTTON_STATE_PRESSED:\r\n rect.x += 1\r\n rect.y += 1\r\n\r\n if button_state in [AUI_BUTTON_STATE_HOVER, AUI_BUTTON_STATE_PRESSED]:\r\n\r\n if pane.state & optionActive:\r\n\r\n dc.SetBrush(wx.Brush(StepColour(self._active_caption_colour, 120)))\r\n dc.SetPen(wx.Pen(StepColour(self._active_caption_colour, 70)))\r\n\r\n else:\r\n\r\n dc.SetBrush(wx.Brush(StepColour(self._inactive_caption_colour, 120)))\r\n dc.SetPen(wx.Pen(StepColour(self._inactive_caption_colour, 70)))\r\n\r\n if wx.Platform != \"__WXMAC__\":\r\n # draw the background behind the button\r\n dc.DrawRectangle(rect.x, rect.y, 15, 15)\r\n else:\r\n # Darker the bitmap a bit\r\n bmp = DarkenBitmap(bmp, self._active_caption_colour, StepColour(self._active_caption_colour, 110))\r\n\r\n if isVertical:\r\n bmp = wx.ImageFromBitmap(bmp).Rotate90(clockwise=False).ConvertToBitmap()\r\n \r\n # draw the button itself\r\n dc.DrawBitmap(bmp, rect.x, rect.y, True)", "def drawCells(self):\r\n self.drawing = not self.drawing\r\n if self.drawing:\r\n self.draw_button['text'] = \"No Draw\"\r\n else:\r\n self.draw_button['text'] = \"Draw\"", "def DrawPaneButton(self, dc, part, pt):\r\n\r\n if not self.IsPaneButtonVisible(part):\r\n return\r\n\r\n state = AUI_BUTTON_STATE_NORMAL\r\n\r\n if part.rect.Contains(pt):\r\n\r\n if _VERSION_STRING < \"2.9\":\r\n leftDown = wx.GetMouseState().LeftDown()\r\n else:\r\n leftDown = wx.GetMouseState().LeftIsDown()\r\n\r\n if leftDown:\r\n state = AUI_BUTTON_STATE_PRESSED\r\n else:\r\n state = AUI_BUTTON_STATE_HOVER\r\n\r\n self._art.DrawPaneButton(dc, self._frame, part.button.button_id,\r\n state, part.rect, part.pane)", "def DrawButton(self, dc, wnd, item, rect):\r\n\r\n bmp_rect, text_rect = self.GetToolsPosition(dc, item, rect)\r\n \r\n if not item.GetState() & AUI_BUTTON_STATE_DISABLED:\r\n \r\n if item.GetState() & AUI_BUTTON_STATE_PRESSED:\r\n \r\n dc.SetPen(wx.Pen(self._highlight_colour))\r\n dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 150)))\r\n dc.DrawRectangleRect(rect)\r\n \r\n elif item.GetState() & AUI_BUTTON_STATE_HOVER or item.IsSticky():\r\n \r\n dc.SetPen(wx.Pen(self._highlight_colour))\r\n dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 170)))\r\n\r\n # draw an even lighter background for checked item hovers (since\r\n # the hover background is the same colour as the check background)\r\n if item.GetState() & AUI_BUTTON_STATE_CHECKED:\r\n dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 180)))\r\n\r\n dc.DrawRectangleRect(rect)\r\n \r\n elif item.GetState() & AUI_BUTTON_STATE_CHECKED:\r\n \r\n # it's important to put this code in an else statment after the\r\n # hover, otherwise hovers won't draw properly for checked items\r\n dc.SetPen(wx.Pen(self._highlight_colour))\r\n dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 170)))\r\n dc.DrawRectangleRect(rect)\r\n \r\n if item.GetState() & AUI_BUTTON_STATE_DISABLED:\r\n bmp = item.GetDisabledBitmap()\r\n else:\r\n bmp = item.GetBitmap()\r\n\r\n if bmp.IsOk():\r\n dc.DrawBitmap(bmp, bmp_rect.x, bmp_rect.y, True)\r\n\r\n # set the item's text colour based on if it is disabled\r\n dc.SetTextForeground(wx.BLACK)\r\n if item.GetState() & AUI_BUTTON_STATE_DISABLED:\r\n dc.SetTextForeground(DISABLED_TEXT_COLOUR)\r\n\r\n if self._agwFlags & AUI_TB_TEXT and item.GetLabel() != \"\":\r\n self.DrawLabel(dc, wnd, item, text_rect)", "def add_selection(self, coord):\n button = self.grid[coord]\n button['bg'] = active\n button['activebackground'] = active", "def show_buttons(self):\n for button in self.buttons:\n x = button.starting_x\n y = button.starting_y\n self.screen.fill(button.color, ((x, y), (button.width, button.height)))", "def button(win, text, x, y, w, h, c, hc, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(win,hc,(x,y,w,h))\n\n if click[0] == 1 and action != None:\n action()\n else:\n pygame.draw.rect(win,c,(x,y,w,h))\n\n TextSurf, TextRect = textObject(text, rs.smallText, rs.black)\n TextRect.center = ((x+(w/2)),(y+(h/2)))\n win.blit(TextSurf, TextRect)", "def DrawPushButton(*args, **kwargs):\n return _gdi_.RendererNative_DrawPushButton(*args, **kwargs)", "def draw_single_button(self, color, position):\n\n self.pygame.draw.ellipse(self.gameDisplay, color, position)", "def paint_project_button(self, running):\r\n if running:\r\n self.btn_start.setIcon(QIcon(\r\n os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"red_btn.png\"))))\r\n else:\r\n self.btn_start.setIcon(QIcon(\r\n os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"green_btn.png\"))))\r\n self.btn_start.setIconSize(QSize(\r\n self.btn_start.width(), self.btn_start.height()))", "def draw(self):\n if not self.pressed:\n #draw info prompt in room\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ANTIQUE_BRASS)\n arcade.draw_text(\"?\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw info to top of screen when clicked\n arcade.draw_text(self.text, 10, settings.HEIGHT - 10, arcade.color.BLACK, anchor_x=\"left\", anchor_y=\"top\")", "def show(self):\r\n stroke(0) # determine the color\r\n circle((self.position.x, self.position.y), radius=10) # creates a circle with defined radius\r", "def draw(self):\n if not self.pressed:\n #draw dialogue prompt\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ALABAMA_CRIMSON)\n arcade.draw_text(\"!\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw dialogue box\n arcade.draw_rectangle_filled(self.center_x, self.center_y, self.width, self.height, self.color)\n arcade.draw_text(self.text, self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")", "def DrawButton(self, dc, wnd, in_rect, button, orientation):\r\n\r\n bitmap_id, button_state = button.id, button.cur_state\r\n \r\n if bitmap_id == AUI_BUTTON_CLOSE:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_close_bmp\r\n elif button_state & AUI_BUTTON_STATE_HOVER:\r\n bmp = self._hover_close_bmp\r\n elif button_state & AUI_BUTTON_STATE_PRESSED:\r\n bmp = self._pressed_close_bmp\r\n else:\r\n bmp = self._active_close_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_LEFT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_left_bmp\r\n else:\r\n bmp = self._active_left_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_RIGHT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_right_bmp\r\n else:\r\n bmp = self._active_right_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_WINDOWLIST:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_windowlist_bmp\r\n else:\r\n bmp = self._active_windowlist_bmp\r\n\r\n else:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = button.dis_bitmap\r\n else:\r\n bmp = button.bitmap\r\n \r\n if not bmp.IsOk():\r\n return\r\n\r\n rect = wx.Rect(*in_rect)\r\n\r\n if orientation == wx.LEFT:\r\n \r\n rect.SetX(in_rect.x)\r\n rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2))\r\n rect.SetWidth(bmp.GetWidth())\r\n rect.SetHeight(bmp.GetHeight())\r\n \r\n else:\r\n \r\n rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(),\r\n ((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2),\r\n bmp.GetWidth(), bmp.GetHeight())\r\n \r\n rect = IndentPressedBitmap(rect, button_state)\r\n dc.DrawBitmap(bmp, rect.x, rect.y, True)\r\n\r\n out_rect = rect\r\n\r\n if bitmap_id == AUI_BUTTON_RIGHT:\r\n self._buttonRect = wx.Rect(rect.x, rect.y, 30, rect.height)\r\n \r\n return out_rect", "def draw(self):\n # static\n surf = self.surf.copy()\n\n # dynamic\n pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*130), 40)\n self.button_rect = self.button_surf.get_rect(center=pos)\n surf.blit(self.button_surf, self.button_rect)\n # move of button box to correct screen position\n self.button_rect.move_ip(self.xpos, self.ypos)\n\n # screen\n screen.blit(surf, (self.xpos, self.ypos))", "def drawButtons(self):\n self.__pausedTitle.draw(self.__screen)\n self.__exitGameButton.draw(self.__screen)\n self.__resumeButton.draw(self.__screen)\n self.__mainMenuButton.draw(self.__screen)", "def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)", "def display(self):\n with push_style():\n fill(255)\n circle((self.xoff + self.x, self.yoff + self.y), 6, mode=\"CENTER\")", "def show(self):\n stroke(*self.status.value)\n fill(*self.status.value)\n circle((self.position.x, self.position.y), radius = 7)", "def create_play_button(self):\n play_button = Button(self.littleFrame, text=\"Rejouer\", font=(\"Arial\", 25), bg='white', relief='groove',\n fg='lightblue',\n command=self.start_game, width=8, activebackground='white',\n activeforeground='lightblue')\n play_button.grid(column=0, row=0)\n invisible_widget = Label(self.littleFrame, text=\" \", bg=\"lightblue\")\n invisible_widget.grid(column=1, row=0)", "def button(msg, font_size, x, y, w, h, color, action):\r\n mouse = pygame.mouse.get_pos() # Grabbing cursor position\r\n click = pygame.mouse.get_pressed() # Mouse button status\r\n \r\n # Check if cursor is on the button\r\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\r\n # Draw the button\r\n pygame.draw.rect(display, color, (x, y, w, h)) \r\n \r\n # Check if we have clicked on the button\r\n if click[0] == 1 and action is not None:\r\n \r\n # Run singleplayer mode\r\n if action == \"Play S\": \r\n mode = \"singleplayer\" # set mode\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # Run multiplayer mode\r\n if action == \"Play M\":\r\n mode = \"multiplayer\" # set mode\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # Quit\r\n if action == \"Quit\":\r\n pygame.quit()\r\n quit()\r\n \r\n # Demo\r\n if action == \"Demo\":\r\n mode = \"demo\"\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # set display\r\n pygame.display.set_mode((display_width, display_height), pygame.RESIZABLE)\r\n \r\n # Displaying text on the button\r\n font = pygame.font.Font('freesansbold.ttf', font_size)\r\n text_surf, text_rect = text_objects(msg, font)\r\n text_rect.center = ((x+(w/2)), (y+(h/2)))\r\n display.blit(text_surf, text_rect)", "def draw(self, win, outline=None):\n # Call this method to draw the button on the screen\n if outline:\n pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.width + 4, self.height + 4), 0)\n\n pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.height), 0)\n\n if self.text != '':\n font = pygame.font.SysFont('comicsans', 30)\n text = font.render(self.text, 1, black)\n win.blit(text, (self.x + (self.width // 2 - text.get_width() // 2), self.y + (self.height // 2 - text.get_height() // 2)))", "def OnDrawGTKStyle(self, event):\r\n\r\n dc = wx.AutoBufferedPaintDC(self._pButton)\r\n dc.SetBackground(wx.Brush(self.GetBackgroundColour()))\r\n dc.Clear()\r\n \r\n self.OnDrawGTKExpander(dc)\r\n self.OnDrawGTKText(dc)", "def _draw_square(self, left_x, top_y, side, color, fill):\n self.pen.up()\n self.pen.color(color)\n self.pen.goto(left_x, top_y)\n self.pen.down()\n self.pen.begin_fill()\n for _ in range(4):\n self.pen.forward(side)\n self.pen.right(90)\n self.pen.end_fill()", "def DrawPaneButton(self, dc, window, button, button_state, rect, pane): \r\n\r\n if self.usingTheme:\r\n\r\n hTheme = self.hTheme1 \r\n \r\n # Get the real button position (compensating for borders)\r\n drect = wx.Rect(rect.x, rect.y, self._button_size, self._button_size)\r\n \r\n # Draw the themed close button\r\n rc = RECT(0, 0, 0, 0)\r\n if pane.HasCaptionLeft():\r\n rc.top = rect.x + self._button_border_size\r\n rc.left = int(rect.y + 1.5*self._button_border_size)\r\n rc.right = rect.x + self._button_size + self._button_border_size\r\n rc.bottom = int(rect.y + self._button_size + 1.5*self._button_border_size)\r\n else:\r\n rc.top = rect.x - self._button_border_size\r\n rc.left = int(rect.y + 1.5*self._button_border_size)\r\n rc.right = rect.x + self._button_size- self._button_border_size\r\n rc.bottom = int(rect.y + self._button_size + 1.5*self._button_border_size)\r\n\r\n if button == AUI_BUTTON_CLOSE:\r\n btntype = 19\r\n \r\n elif button == AUI_BUTTON_PIN:\r\n btntype = 23\r\n\r\n elif button == AUI_BUTTON_MAXIMIZE_RESTORE:\r\n if not pane.IsMaximized():\r\n btntype = 17\r\n else:\r\n btntype = 21\r\n else:\r\n btntype = 15\r\n\r\n state = 4 # CBS_DISABLED\r\n \r\n if pane.state & optionActive:\r\n\r\n if button_state == AUI_BUTTON_STATE_NORMAL:\r\n state = 1 # CBS_NORMAL\r\n\r\n elif button_state == AUI_BUTTON_STATE_HOVER:\r\n state = 2 # CBS_HOT\r\n\r\n elif button_state == AUI_BUTTON_STATE_PRESSED:\r\n state = 3 # CBS_PUSHED\r\n\r\n else:\r\n raise Exception(\"ERROR: Unknown State.\")\r\n\r\n else: # inactive pane\r\n\r\n if button_state == AUI_BUTTON_STATE_NORMAL:\r\n state = 5 # CBS_NORMAL\r\n\r\n elif button_state == AUI_BUTTON_STATE_HOVER:\r\n state = 6 # CBS_HOT\r\n\r\n elif button_state == AUI_BUTTON_STATE_PRESSED:\r\n state = 7 # CBS_PUSHED\r\n\r\n else:\r\n raise Exception(\"ERROR: Unknown State.\")\r\n\r\n try:\r\n winxptheme.DrawThemeBackground(hTheme, dc.GetHDC(), btntype, state, (rc.top, rc.left, rc.right, rc.bottom), None)\r\n except TypeError:\r\n return\r\n\r\n else:\r\n\r\n # Fallback to default closebutton if themes are not enabled\r\n rect2 = wx.Rect(rect.x-4, rect.y+2, rect.width, rect.height)\r\n AuiDefaultDockArt.DrawPaneButton(self, dc, window, button, button_state, rect2, pane)", "def draw_push_button(self, text, event_name, num_items = 1, item = 0):\n width = self.XCOLUMNSKIP//num_items\n self.guiElements[event_name] = Draw.PushButton(\n text,\n self.event_id(event_name),\n self.xPos + item*width, self.yPos, width, self.YLINESKIP)\n if item + 1 == num_items:\n self.yPos -= self.YLINESKIP", "def define_button(self):\n self.separator1 = pygame.Rect(\n 0,\n SCREEN_WIDTH,\n SCREEN_WIDTH,\n BIG_LINE_WIDTH,\n )\n self.separator2 = pygame.Rect(\n 0,\n SCREEN_WIDTH + BIG_LINE_WIDTH // 2,\n SCREEN_WIDTH,\n BIG_LINE_WIDTH,\n )\n\n self.button = pygame.Rect(\n SCREEN_WIDTH // 2 - BUTTON_WIDTH // 2,\n (SCREEN_HEIGHT + SCREEN_WIDTH) // 2 - BUTTON_HEIGHT // 2,\n BUTTON_WIDTH,\n BUTTON_HEIGHT,\n )", "def DrawButtons(self, dc, _rect, bmp, bkcolour, button_state):\r\n\r\n rect = wx.Rect(*_rect)\r\n\r\n if button_state == AUI_BUTTON_STATE_PRESSED:\r\n rect.x += 1\r\n rect.y += 1\r\n\r\n if button_state in [AUI_BUTTON_STATE_HOVER, AUI_BUTTON_STATE_PRESSED]:\r\n dc.SetBrush(wx.Brush(StepColour(bkcolour, 120)))\r\n dc.SetPen(wx.Pen(StepColour(bkcolour, 75)))\r\n\r\n # draw the background behind the button\r\n dc.DrawRectangle(rect.x, rect.y, 15, 15)\r\n\r\n # draw the button itself\r\n dc.DrawBitmap(bmp, rect.x, rect.y, True)", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def draw_buttons(self):\n for button in self.playing_buttons:\n button.draw(self.screen)", "def on_click(button):\n global ttt, choices, count, sym, result, x_pos, o_pos\n\n if count % 2 == 0:\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n button.config(\n text=sym,\n state='disabled',\n disabledforeground=\"red\") # For cross\n\n x, y = get_coordinates(button)\n x += 1\n y += 1\n x_pos.append((x, y))\n state = gen_state(to_move='O', x_positions=x_pos,\n o_positions=o_pos)\n try:\n choice = choices.get()\n if \"Random\" in choice:\n a, b = random_player(ttt, state)\n elif \"Pro\" in choice:\n a, b = minimax_decision(state, ttt)\n else:\n a, b = alphabeta_player(ttt, state)\n except (ValueError, IndexError, TypeError) as e:\n disable_game()\n result.set(\"It's a draw :|\")\n return\n if 1 <= a <= 3 and 1 <= b <= 3:\n o_pos.append((a, b))\n button_to_change = get_button(a - 1, b - 1)\n if count % 2 == 0: # Used again, will become handy when user is given the choice of turn.\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n if check_victory(button):\n result.set(\"You win :)\")\n disable_game()\n else:\n button_to_change.config(text=sym, state='disabled',\n disabledforeground=\"black\")\n if check_victory(button_to_change):\n result.set(\"You lose :(\")\n disable_game()", "def draw(self):\n self.strip.show()", "def cover_button(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n t.speed(20)\r\n t.penup()\r\n t.goto(190,-260)\r\n t.setheading(0)\r\n t.color(\"#696969\")\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(2):\r\n t.forward(150)\r\n t.left(90)\r\n t.forward(70)\r\n t.left(90)\r\n t.end_fill()\r\n t.goto(0,0)", "def DrawOverflowButton(self, dc, wnd, rect, state):\r\n \r\n if state & AUI_BUTTON_STATE_HOVER or state & AUI_BUTTON_STATE_PRESSED:\r\n \r\n cli_rect = wnd.GetClientRect()\r\n light_gray_bg = StepColour(self._highlight_colour, 170)\r\n\r\n if self._agwFlags & AUI_TB_VERTICAL:\r\n \r\n dc.SetPen(wx.Pen(self._highlight_colour))\r\n dc.DrawLine(rect.x, rect.y, rect.x+rect.width, rect.y)\r\n dc.SetPen(wx.Pen(light_gray_bg))\r\n dc.SetBrush(wx.Brush(light_gray_bg))\r\n dc.DrawRectangle(rect.x, rect.y+1, rect.width, rect.height)\r\n \r\n else:\r\n \r\n dc.SetPen(wx.Pen(self._highlight_colour))\r\n dc.DrawLine(rect.x, rect.y, rect.x, rect.y+rect.height)\r\n dc.SetPen(wx.Pen(light_gray_bg))\r\n dc.SetBrush(wx.Brush(light_gray_bg))\r\n dc.DrawRectangle(rect.x+1, rect.y, rect.width, rect.height)\r\n \r\n x = rect.x + 1 + (rect.width-self._overflow_bmp.GetWidth())/2\r\n y = rect.y + 1 + (rect.height-self._overflow_bmp.GetHeight())/2\r\n dc.DrawBitmap(self._overflow_bmp, x, y, True)", "def draw(self, surface, offset=(0,0)):\n mouse = pg.mouse.get_pos()\n pos = mouse[0]-offset[0], mouse[1]-offset[1]\n if self.clicked:\n fill_color = pg.Color(\"white\")\n text = self.selected_text\n elif self.rect.collidepoint(pos):\n fill_color = (198, 226, 255)\n text = self.selected_text\n else:\n fill_color = self.color\n text = self.text\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(fill_color, self.rect.inflate(-2,-2))\n surface.blit(text, self.text_rect)", "def paint(self):\r\n self.win.bkgd(\" \", COLOR_PAIR[\"con_text\"])", "def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)", "def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)", "def DrawButton(self, dc, wnd, in_rect, button, orientation):\r\n\r\n bitmap_id, button_state = button.id, button.cur_state\r\n \r\n if bitmap_id == AUI_BUTTON_CLOSE:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_close_bmp\r\n else:\r\n bmp = self._active_close_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_LEFT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_left_bmp\r\n else:\r\n bmp = self._active_left_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_RIGHT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_right_bmp\r\n else:\r\n bmp = self._active_right_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_WINDOWLIST:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_windowlist_bmp\r\n else:\r\n bmp = self._active_windowlist_bmp\r\n\r\n else:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = button.dis_bitmap\r\n else:\r\n bmp = button.bitmap\r\n \r\n if not bmp.IsOk():\r\n return\r\n\r\n rect = wx.Rect(*in_rect)\r\n\r\n if orientation == wx.LEFT:\r\n \r\n rect.SetX(in_rect.x)\r\n rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2))\r\n rect.SetWidth(bmp.GetWidth())\r\n rect.SetHeight(bmp.GetHeight())\r\n \r\n else:\r\n \r\n rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(),\r\n ((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2),\r\n bmp.GetWidth(), bmp.GetHeight())\r\n\r\n self.DrawButtons(dc, rect, bmp, wx.WHITE, button_state)\r\n\r\n out_rect = wx.Rect(*rect)\r\n return out_rect", "def boutton(self,img1,x,y):\r\n self.button.append(self.creat_image(img1,x,y))", "def remove_selection(self, coord):\n button = self.grid[coord]\n button['bg'] = default_color\n button['activebackground'] = '#38dcf5'", "def colour_press(self):\n global last_button\n if last_button is None:\n # If there is no \"last button press\", set this as the latest one\n last_button = self\n else:\n # Another button has been pressed before. Switch the colours of the two\n last_button.background_color, self.background_color = self.background_color, last_button.background_color\n # Set their states back to normal and reset the last button pressed\n last_button.state = 'normal'\n self.state = 'normal'\n last_button = None\n # Check if the switch removed any blocks\n points = self.screen.check_removal()\n if points == 0:\n # If nothing has been removed, the player gets one step closer to losing\n self.screen.misses += 1\n else:\n # Give the player the points\n self.screen.points += points\n if self.screen.misses > 3:\n # Player has lost, leave the game\n self.screen.leave()", "def create_quit_button(self):\n quit_button = Button(self.littleFrame, text=\"Quitter\", font=(\"Arial\", 25), bg='white', relief='groove',\n fg='light blue',\n command=self.leave_page, width=8, activebackground='red',\n activeforeground='black')\n quit_button.grid(column=2, row=0)", "def draw_menu(self):\n self.screen.fill(self.menu_color, self.rect)\n pygame.draw.rect(self.screen, self.border_color, self.rect, 5)\n self.screen.blit(self.title_image, self.title_image_rect)\n\n self.play_button.draw_button()", "def draw_instruction():\r\n arcade.draw_text(\r\n \"This is a game of Santa, Reindeer, Snowman\", 0, 50, arcade.color.WHITE, 15\r\n )\r\n arcade.draw_text(\r\n \"Santa beats snowman, snowman beats reindeer, reindeer beats santa\",\r\n 0,\r\n 30,\r\n arcade.color.WHITE,\r\n 13,\r\n )\r\n arcade.draw_text(\r\n \"Press button 1 for santa, 2 for reindeer, and 3 for snowman\",\r\n 0,\r\n 10,\r\n arcade.color.WHITE,\r\n 15,\r\n )\r\n arcade.draw_text(\r\n \"User Choice\", WINDOW_WIDTH - 175, WINDOW_HEIGHT - 60, arcade.color.WHITE, 15\r\n )\r\n arcade.draw_text(\"CPU Choice\", 75, WINDOW_HEIGHT - 60, arcade.color.WHITE, 15)", "def draw(self):\n if self.master != None :\n fill = self.fill\n #fill = Cell.FILLED_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n\n #if not self.fill:\n # fill = Cell.EMPTY_COLOR_BG\n # outline = Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def draw(self):\n if context.click():\n self.place()", "def triangleBtnHandler(val):\n if val == 1 :\n print(\"Triangle button pressed\")\n else:\n print(\"Triangle button released\")", "def button(msg,x,y,w,h,ic,ac,action=None): # dit is de function die een button aanmaakt (text,x,y,width,height,kleur, hover kleur, actie)\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y: #als de muis over de knop hovert, verander de kleur\r\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\r\n if click[0] == 1 and action != None: #als je er op klikt, doe actie\r\n action()\r\n else:\r\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\r\n smallText = pygame.font.SysFont(\"freesansbold.ttf\",20)\r\n textSurf, textRect = text_objects(msg, smallText)\r\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\r\n gameDisplay.blit(textSurf, textRect)", "def draw_open(x, y):\n square_pos_x = x * 30\n square_pos_y = (y - 1) * -30\n penup()\n pencolor('#ff9800')\n # Sets the position on the position (15, 25) in the square of size (30,30) and draws a filled circle\n setpos(-500 + square_pos_x + 15, 200 + square_pos_y - 25)\n pendown()\n circle(10)", "def _draw_indicator(\n self, src, center, color=(255, 0, 0), shape=\"circle\", size=4, thickness=1\n ):\n if isinstance(center, tuple):\n center = new_point(*center)\n if shape == \"rect\":\n draw_rectangle(\n src,\n center.x - size / 2.0,\n center.y - size / 2.0,\n size,\n size,\n color=color,\n thickness=thickness,\n )\n elif shape == \"crosshairs\":\n draw_lines(\n src,\n [\n [(center.x - size, center.y), (center.x + size, center.y)],\n [(center.x, center.y - size), (center.x, center.y + size)],\n ],\n color=color,\n thickness=thickness,\n )\n else:\n draw_circle(src, center[0], center[1], size, color=color)", "def drawWidget(self, qp):\n # Prepare brush.\n brush = QtGui.QBrush()\n brush.setStyle(Qt.SolidPattern)\n if self.is_selected():\n # Fill selected circle with dimmed color\n brush.setColor(self.color_dimmed)\n else:\n brush.setColor(self.parentWidget().BACKGROUND_COLOR)\n qp.setBrush(brush)\n\n # Prepare pen.\n pen = QtGui.QPen()\n pen.setColor(self.color)\n pen.setWidth(2);\n qp.setPen(pen)\n\n size = self.size()\n w = size.width()\n h = size.height()\n center = QPoint(w // 2, h // 2)\n radius = min(w, h) // 2 - 2\n \n qp.drawEllipse(center, radius, radius)", "def paintButtons(self):\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK)\n buttonOK = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"ok\"], self.showTooltip, self.removeTooltip)\n buttonOK.topleft = [770, 30]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.changeConfiguration)\n self.window.add_child(buttonOK)\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL)\n buttonCancel = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"cancel\"], self.showTooltip, self.removeTooltip)\n buttonCancel.topleft = [890, 30]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeConfiguration)\n self.window.add_child(buttonCancel)", "def draw():", "def init_draw_button(self):\n def draw():\n \"\"\"\n Invoke draw function of fractal class of the parent\n with suitable arguments\n \"\"\"\n recursion_depth = self.get_recursion_depth()\n base_length = self.get_base_length()\n self.parent_class.classes[\"fractal\"].set_base_length(base_length)\n is_curved = self.vars[\"round_corners\"].get()\n fill_color = self.vars[\"fill_color\"].get()\n self.parent_class.classes[\"fractal\"].draw_fractal(\n recursion_depth, is_curved, fill_color)\n\n self.buttons[\"btn_draw\"] = Button(\n self.frame, width=14, text=\"Draw Fractal\", command=draw)\n self.buttons[\"btn_draw\"].grid(row=3, column=0)", "def buttonbox(self):\n self.ok_button = tk.Button(\n self, text=\"OK\", width=5, command=lambda: self.destroy()\n )\n self.ok_button.pack(pady=10)", "def IndentPressedBitmap(rect, button_state):\r\n\r\n if button_state == AUI_BUTTON_STATE_PRESSED:\r\n rect.x += 1\r\n rect.y += 1\r\n\r\n return rect", "def draw(self, surface, offset=(0,0)):\n for button in self.buttons:\n button.draw(surface, offset)", "def eventHandler(self, event: pygame.event):\n # change selected color if this button's rectangle was clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.rect.collidepoint(event.pos): # is mouse over button\n self.image = self._images[ButtonImages.CLICKING_IMAGE.value]\n self.beingClicked = True\n for func, *args in self.functionsToInvokeWhenClicked:\n func(*args)\n elif event.type == pygame.MOUSEBUTTONUP and self.beingClicked:\n if event.button == 1:\n self.beingClicked = False\n self.image = self._images[ButtonImages.DEFAULT_IMAGE.value]", "def create_button(self, starting_pixel: Tuple[int, int], width: int, height: int, color: int, text: str):\n self.buttons.append(Button(starting_pixel, width, height, color, text))", "def sprint(self):\n self.buttons = []\n self.screen.blit(self.background_image, (0, 0))\n self.create_button((self.width // 2 - 257, self.height // 8 - 85), 501, 200, Colors.BLACK, \"20L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 3 - 81), 501, 200, Colors.BLACK, \"40L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 5 - 86), 501, 200, Colors.BLACK, \"100L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 7 - 85), 501, 200, Colors.BLACK, \"1000L\")\n self.show_buttons()\n self.show_text_in_buttons()\n pygame.display.flip()", "def update_reset_button(self):\r\n if self.board.hovered_tiles and self.is_left_mouse_down:\r\n self.reset_button.draw_uhoh()\r\n else:\r\n self.reset_button.draw_smiley()", "def draw(self):\n if self.visible:\n glColor3f(self.r, self.g, self.b)\n graphicsBall(self.x, self.y, self.radius)\n\n if self.number <= 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n graphicsBall(self.x, self.y, self.radius / 2)\n\n if self.number > 0:\n if self.number > 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n if self.number < 10:\n graphicsText(self.x - 2, self.y - 3.5, str(self.number))\n else:\n graphicsText(self.x - 4.5, self.y - 3.5, str(self.number))", "def check_button_hover(coord, play_button, high_scores_button):\r\n x = coord[0]\r\n y = coord[1]\r\n play_x = (play_button.rect.x <= x <= play_button.rect.x + play_button.width)\r\n play_y = (play_button.rect.y <= y <= play_button.rect.y + play_button.height)\r\n scores_x = (high_scores_button.rect.x <= x <= high_scores_button.rect.x + high_scores_button.width)\r\n scores_y = (high_scores_button.rect.y <= y <= high_scores_button.rect.y + high_scores_button.height)\r\n if play_x and play_y:\r\n play_button.text_color = (0, 255, 0)\r\n else:\r\n play_button.text_color = (255, 255, 255)\r\n\r\n play_button.prep_msg()\r\n play_button.draw_button()\r\n\r\n if scores_x and scores_y:\r\n high_scores_button.text_color = (0, 255, 0)\r\n else:\r\n high_scores_button.text_color = (255, 255, 255)\r\n\r\n high_scores_button.prep_msg()\r\n high_scores_button.draw_button()", "def set_active_tool_button(self, active_button):\n\n # button_style = 'font-weight: bold'\n # active_style = \"background-color: blue; color: white\"\n # active_style = \"background-color: rgb(0,49,80); color: white\"\n active_style = \"background-color: rgb(0,112,192); color: white\"\n inactive_style = \"background-color: none; color: none\"\n\n # Reset all button colours\n self.projConfigButton.setStyleSheet(inactive_style)\n self.rawDataButton.setStyleSheet(inactive_style)\n self.dataQualityButton.setStyleSheet(inactive_style)\n self.statsScreeningButton.setStyleSheet(inactive_style)\n self.spectralScreeningButton.setStyleSheet(inactive_style)\n self.histogramsButton.setStyleSheet(inactive_style)\n self.seascatterButton.setStyleSheet(inactive_style)\n self.transFuncsButton.setStyleSheet(inactive_style)\n self.fatigueButton.setStyleSheet(inactive_style)\n\n # Colour active dashboard button\n if active_button == \"config\":\n self.projConfigButton.setStyleSheet(active_style)\n if active_button == \"raw\":\n self.rawDataButton.setStyleSheet(active_style)\n if active_button == \"quality\":\n self.dataQualityButton.setStyleSheet(active_style)\n if active_button == \"stats\":\n self.statsScreeningButton.setStyleSheet(active_style)\n if active_button == \"spectral\":\n self.spectralScreeningButton.setStyleSheet(active_style)\n if active_button == \"histograms\":\n self.histogramsButton.setStyleSheet(active_style)\n if active_button == \"seascatter\":\n self.seascatterButton.setStyleSheet(active_style)\n if active_button == \"tf\":\n self.transFuncsButton.setStyleSheet(active_style)\n if active_button == \"fatigue\":\n self.fatigueButton.setStyleSheet(active_style)", "def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def change_start_button(event):\n img_start_button_mouse_over = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_raised_active.png\")\n lbl_start_game.config(image=img_start_button_mouse_over)\n lbl_start_game.image = img_start_button_mouse_over\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=6)", "def rectan_button(msg,x,y,w=100,h=100,ic=green,ac=green_bright,action=None,size=20,font='freesansbold.ttf'):\n mouse = pygame.mouse.get_pos() #pobiera pozycje myszki i zwraca x w mouse[0] i y w mouse[1]\n click = pygame.mouse.get_pressed() # click[0] lewy, click[1] srodkowy , click[2] prawy przycisk myszy \n \n #print(mouse)\n a = (x+w > mouse[0] and x < mouse[0] and y+h>mouse[1] and y < mouse[1]) #warunek na to , czy pozycja myszki jest w prostokacie przycisku\n if a: \n pygame.draw.rect(gameDisplay,ac,(x,y,w,h)) #rysuje jasniejszy prostokąt, wydaje sie ze podswietlony, gdy myszka na nim.\n \n if click[0]==1 and action!=None:\n #sleep zeby sie nie wcisnely 2 przyciski jak np. wychodzisz z opcji, a w miejscu przycisku 'back' w glownym menu jest 'start'\n time.sleep(0.1)\n action() \n else:\n pygame.draw.rect(gameDisplay,ic,(x,y,w,h)) #rysuje ciemny prostokat, jesli a nie jest prawdą\n \n\n # tutaj tworzy sie napis na srodku ekranu. \n # mozna dorzucic opcje wyboru \n textfont = pygame.font.Font('freesansbold.ttf',20)\n textsurf,textrect = text_objects(msg,textfont,black)\n textrect.center = ((x+(w/2)),(y+(h/2)))\n gameDisplay.blit(textsurf,textrect)", "def draw_sound_button(self):\n if self.settings.sound_on:\n self.screen.blit(self.image_sound_on, self.rect)\n else:\n self.screen.blit(self.image_sound_off, self.rect)", "def create_quit_button(self):\n quit_button = Button(self.littleFrame, text=\"Quitter\", font=(\"Arial\", 25), bg='white', relief='groove',\n fg='lightblue',\n command=self.leave_page, width=8, activebackground='white',\n activeforeground='lightblue')\n quit_button.grid(column=2, row=0)", "def draw(self, screen):\r\n if self.selected:\r\n used_color = (255 - self.color[0], 255 - self.color[1], 255 - self.color[2])\r\n else:\r\n used_color = self.color\r\n pygame.draw.rect(screen, used_color,\r\n (self.location_top_left[0], self.location_top_left[1], self.size_x, self.size_y), 0)", "def draw_a(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.right(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(50)", "def UpdateButtonOnScreen(self, button_ui_part, event):\r\n\r\n hit_test = self.HitTest(*event.GetPosition())\r\n\r\n if not hit_test or not button_ui_part:\r\n return\r\n \r\n state = AUI_BUTTON_STATE_NORMAL\r\n \r\n if hit_test == button_ui_part:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_PRESSED\r\n else:\r\n state = AUI_BUTTON_STATE_HOVER\r\n else:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_HOVER\r\n \r\n # now repaint the button with hover state\r\n cdc = wx.ClientDC(self._frame)\r\n\r\n # if the frame has a toolbar, the client area\r\n # origin will not be (0,0).\r\n pt = self._frame.GetClientAreaOrigin()\r\n if pt.x != 0 or pt.y != 0:\r\n cdc.SetDeviceOrigin(pt.x, pt.y)\r\n\r\n if hit_test.pane: \r\n self._art.DrawPaneButton(cdc, self._frame,\r\n button_ui_part.button.button_id,\r\n state,\r\n button_ui_part.rect, hit_test.pane)", "def click(self, event):\n x, y = self.canvas.invert([event.x, event.y])\n i, j = int(floor(x)), int(floor(y))\n patch = self.get_cell(i, j)\n if patch and patch.state == \"green\":\n cluster = self.get_cluster(patch)\n self.show_cluster(cluster)", "def expose(self, widget, event):\n cr = widget.window.cairo_create()\n cr.set_source_rgb(0.05, 0.05, 0.05)\n cr.paint()\n for pos in self.next_piece.occupying():\n self.paint_square(tuple_add(pos, (-1, 1)),\n self.next_piece.color, cr)", "def option(self, event):\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()", "def draw_selected(self):\n if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):\n self.color_cell(pos=self.get_selected(\n ), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED)", "def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)", "def __createButton(self):\r\n self.button = QPushButton(\"Plot\") # text diplayed on the button\r\n self.button.setShortcut(\"Ctrl+P\") # adding a shortcut \r\n self.button.clicked.connect(self.__onClick) # connect it to the __onClick function\r", "def button_on(self, value: int):\n if value is 1:\n self.button_1 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 50, 570)\n elif value is 2:\n self.button_2 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 50, 75)\n elif value is 3:\n self.button_3 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 750, 570)\n elif value is 4:\n self.button_4 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 750, 75)", "def OnBitmapButton1StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 1\r\n\t\tself.SetStars()", "def paintColorPalette(self, method, paletteType, tag): \n baseX = 35\n sizeX = 70\n if paletteType == \"hair\":\n baseY = 515\n else:\n baseY = 510\n sizeY = 45\n offset = 10\n buttons = self.getPaletteButtons(paletteType)\n for i in range(len(buttons)):\n for j in range(len(buttons[0])):\n button = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, buttons[i][j])))\n button.topleft = [baseX + sizeX * j + offset * j, baseY + sizeY * i + offset * i]\n button.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, method, j + 1 + (i * 3))\n self.window.add_child(button)\n self.activeWidget.append(button)\n self.paintSelectionItem(tag)", "def paintSizePalette(self, method): \n baseX = 70\n baseY = 70\n sizeY = 150\n offset = 10\n buttons = [\"s\", \"m\", \"l\", \"xl\"]\n for i in range(len(buttons)):\n button = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, buttons[i] + \".png\")), self.buttonTooltips[buttons[i]], self.showTooltip, self.removeTooltip)\n button.topleft = [baseX , baseY + sizeY * i + offset * i]\n button.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, method, buttons[i].upper())\n self.window.add_child(button)\n self.activeWidget.append(button)", "def update_button(self, button: tk.Button):\n if self.game.whose_turn == 0:\n button_text = \"X\"\n else:\n button_text = \"O\"\n button.configure(state='disabled', text=button_text)", "def render_active(self):\n # Rendering button \"background\"\n if self.resize_right:\n self.active_background_surface = pygame.Surface((self.w * 1.05, self.h))\n else:\n self.active_background_surface = pygame.Surface((self.w, self.h))\n self.active_background_surface.set_alpha(self.alpha)\n self.active_background_surface.fill(self.color_bg_active)\n self.screen.blit(self.active_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates\n # Rendering button text\n self.screen.blit(self.inactive_text_surface, self.inactive_textRect)", "def paint_square(self, pos, color, cr):\n cr.set_source_rgb(*color)\n i, j = pos\n cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)\n cr.fill()", "def create_button(self, button_info_tuple):\n \n font1 = pygame.font.Font(None, 26)\n font2 = pygame.font.Font(None, 36)\n\n xpos = button_info_tuple[0]\n ypos = button_info_tuple[1]\n width = button_info_tuple[2]\n height = button_info_tuple[3]\n colour = button_info_tuple[4]\n text = button_info_tuple[5]\n function = button_info_tuple[6]\n amount = button_info_tuple[7]\n highlight = button_info_tuple[8]\n persistence = button_info_tuple[9]\n menu_change = button_info_tuple[10]\n\n rendered_Text = font1.render(text, 1, BLACK)\n\n button_Dict = {'xpos' : xpos,\n 'ypos' : ypos,\n 'width' : width,\n 'height' : height,\n 'colour' : colour,\n 'text' : rendered_Text,\n 'rect' : pygame.Rect((xpos, ypos), (width, height)),\n 'function' : function[1],\n 'f_index' : function[0],\n 'amount' : amount,\n 'highlight' : highlight,\n 'persistence': persistence,\n 'menuChange' : menu_change,\n 'menu_type' : 'Button'}\n\n self._menu_items.append(button_Dict)", "def button_box(self):\r\n\r\n below_hz_frame = tkinter.Frame(self)\r\n ok_button = ttk.Button(below_hz_frame, text=\"OK\",\r\n width=10, command=self.ok,\r\n default=tkinter.ACTIVE)\r\n ok_button.grid(row=0, column=0, padx=30, pady=10)\r\n cancel_button = ttk.Button(below_hz_frame, text=\"Cancel\", width=10,\r\n command=self.cancel)\r\n cancel_button.grid(row=0, column=1, padx=30, pady=10)\r\n\r\n # bind 'ok' method to the 'enter' button of the keyboard\r\n self.bind(\"<Return>\", self.ok)\r\n\r\n # bind 'cancel' method to the 'esc' button of the keyboard\r\n self.bind(\"<Escape>\", self.cancel)\r\n below_hz_frame.pack(fill=tkinter.X)", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, BULLET_RADIUS, BULLET_COLOR)", "def __init__(self, x, y, width, height, action = RETURN_TRUE, inactivecolour = red, activecolour = orange,\n text = None, textcolour = black, size = 25, border = None):\n super(Button, self).__init__(x, y, width, height)\n def tempTrue(): return True\n def tempFalse(): return False\n def tempNone(): return None\n self.string = False\n if action == Button.RETURN_TRUE:\n action = tempTrue\n elif action == Button.RETURN_FALSE:\n action = tempFalse\n elif action == Button.RETURN_NONE:\n action = tempNone\n elif isinstance(action, str):\n self.string = True\n self.action = action\n self.inactive = inactivecolour\n self.active = activecolour\n self.border = border \n if text == None:\n text = action.__name__.upper()\n if self.string:\n text = self.action\n self.text = text\n self.textcolour = textcolour\n self.size = size", "def __init__(\r\n self, text=\"Ok\", font=None, size=30, state: int = Button.INACTIVE\r\n ) -> None:\r\n super().__init__(state)\r\n self._font = Font(font, size)\r\n self._text = text\r\n self.rect = Rect(0, 0, 0, 0)\r\n self.redraw()", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def refresh(self, xy):\n button = self.squares[xy]\n\n text, fg, bg = self.text_fg_bg(xy)\n button.config(text=text, fg=fg, bg=bg)\n \n if xy in self.opened:\n button.config(relief=Tk.SUNKEN)\n\n if self.empty_remaining > 0:\n self.message(\"%d non-mines left to open\" %\n self.empty_remaining)", "def button_1(cls, cloud_plot, artist, ind):\n is_parent = cloud_plot.is_parent_artist(artist, ind)\n gen = cloud_plot.artist2gen[artist]\n if is_parent:\n vis_now = FigureControl.isVisible(gen)\n FigureControl.makeGenVisible(gen, not vis_now, \"dist\")\n else:\n row_idx = cloud_plot.artist2data[artist][ind]\n for cpl in gs.cloud_plots:\n this_data = cpl.fetch_child_data_point(gen, row_idx)\n cpl.show_new_labels_dp(this_data)\n FigureControl.draw_all_cloud_plots()\n cloud_plot.button_1(artist, ind)" ]
[ "0.7021328", "0.701689", "0.6932498", "0.68985623", "0.6685321", "0.6384838", "0.6380278", "0.63643676", "0.63429385", "0.6320387", "0.6311006", "0.6310892", "0.6305261", "0.6301066", "0.6284226", "0.6218799", "0.61398274", "0.6080243", "0.60589015", "0.6042735", "0.60369974", "0.6024041", "0.60219705", "0.60160524", "0.59240156", "0.5913872", "0.5904214", "0.5900948", "0.58950895", "0.5892363", "0.5885149", "0.5868026", "0.58634156", "0.5860885", "0.58454895", "0.5834043", "0.5829856", "0.5808966", "0.5806467", "0.5803026", "0.5802447", "0.57970214", "0.5791751", "0.5789562", "0.5785715", "0.5743114", "0.57311624", "0.5726753", "0.5720445", "0.571248", "0.5700529", "0.56836253", "0.56802577", "0.56774217", "0.56440586", "0.5642578", "0.5621394", "0.56144565", "0.5608671", "0.5605859", "0.56033736", "0.5601728", "0.56005126", "0.55985355", "0.55878955", "0.55722964", "0.5571523", "0.55699784", "0.5567681", "0.55569905", "0.5542267", "0.5540996", "0.5540993", "0.5540546", "0.5523677", "0.5523622", "0.5521281", "0.55202574", "0.5517344", "0.55068046", "0.5488921", "0.5478549", "0.54735494", "0.546501", "0.54598737", "0.54492223", "0.54464436", "0.5445064", "0.54435724", "0.5434598", "0.5434523", "0.54274213", "0.5425939", "0.5424142", "0.5419839", "0.54078805", "0.5407739", "0.5407091", "0.5404304", "0.5403167" ]
0.70010066
2
Called whenever the mouse moves.
def on_mouse_motion(self, x, y, delta_x, delta_y): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def ev_mousemotion(self, event: MouseMotion) -> None:", "def update(self):\n self.mousePos = pygame.mouse.get_pos()\n self.update_button_hover_status()", "def mousePosition(self):", "def handle_mouse(self, x, y):\n pass", "def ev_MOUSEMOTION(self, event):", "def update(self):\n\n\t\tself.x = games.mouse.x\n\t\tself.y = games.mouse.y\n\t\tself.check_collide()", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n\n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n \n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide()", "def update(self):\n self.x = games.mouse.x\n self.y = games.mouse.y\n self.check_collide()", "def handle_mouse(obj, event):\n if event:\n x = event.globalX()\n y = event.globalY()\n x_w = obj.offset.x()\n y_w = obj.offset.y()\n obj.move(x - x_w, y - y_w)", "def on_mouse_move(self, event):\n if event.is_dragging and event.buttons[0] == 1:\n x0, y0 = event.last_event.pos[0], event.last_event.pos[1]\n x1, y1 = event.pos[0], event.pos[1]\n X0, Y0, Z0 = self.pixel_to_coords(float(x0), float(y0))\n X1, Y1, Z1 = self.pixel_to_coords(float(x1), float(y1))\n self.translate_center(X1 - X0, Y1 - Y0, Z1 - Z0)", "def _motion(self, event):\n if self.current:\n # modify the current line by changing the end coordinates\n # to be the current mouse position\n coords = event.widget.coords(self.current)\n coords[2] = event.x\n coords[3] = event.y\n\n event.widget.coords(self.current, *coords)", "def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()", "def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def update(self):\r\n self.x = 60\r\n self.y = games.mouse.y\r\n self.check_collide()", "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n\n print(x)\n print(y)\n print(delta_x)\n print(delta_y)\n\n\n #self.manage_crosshair()\n \n \n\n #self.crosshair_sprite.center_x += delta_x\n #self.crosshair_sprite.center_y += delta_y\n\n\n self.crosshair_relative_xoffset += delta_x\n self.crosshair_relative_yoffset += delta_y", "def mouseDragged(self, point, delta):\n pass", "def mouse_move(self, pos):\n if (self.setup_type == \"position\"):\n x, y = pos\n self.canvas.move(x, y)", "def mouse_position_event(self, x: int, y: int):\n pass", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10)", "def on_mouse_motion(self, x, y, dx, dy):\n if self.exclusive:\n self.gamestatemanager.peek().on_mouse_motion(x, y, dx, dy)", "def mouseMoveEvent(self, event):\n self.end = event.pos()\n self.update()", "def on_mouse_motion(self, x, y, dx, dy):\n # hazlo aparecer donde este mi jugador en el mouse\n self.player_sprite.center_x = x\n self.player_sprite.center_y = y", "def mouseMoveEvent(self, event):\n if self.view_state.tracking == TrackingMode.FREE and event.buttons() == QtCore.Qt.LeftButton:\n # Calculate the change in mouse position.\n new_mouse_pos = np.array([event.x(), event.y()])\n mouse_delta = new_mouse_pos - self.view_state.mouse\n\n # Add this to the view centre.\n self.view_state.centre = self.view_state.centre - mouse_delta * (1 / self.view_state.scale)\n self.view_state.mouse = new_mouse_pos", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10) # move fist position in place", "def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):\n if self.player_enabled:\n super().on_mouse_motion(x, y, dx, dy)", "def _onmove(self, event):", "def onMouseMove(self,mouseEvent):\n\t\tself.canvas.drawEdgeTo(mouseEvent.x,mouseEvent.y)", "def _on_canvas_mouse(self, event):\n if event.GetEventType() in [wx.wxEVT_MOTION, wx.wxEVT_LEFT_DOWN, \n wx.wxEVT_LEFT_UP, wx.wxEVT_MOTION|wx.wxEVT_LEFT_DOWN]:\n new_event = wx.MouseEvent(event.GetEventType())\n pos = self.tc.ScreenToClient(wx.GetMousePosition())\n new_event.SetPosition(pos)\n new_event.Skip()\n self.tc.GetEventHandler().ProcessEvent(new_event)", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def handle_mouse(self, x, y):\n self.x = x\n self.y = y\n global _pending_handle_mouse\n if not _pending_handle_mouse:\n _pending_handle_mouse = True\n if self.fig.document is not None:\n self.fig.document.add_timeout_callback(self.handle_mouse_callback, 100)\n else:\n self.handle_mouse_callback()", "def OnMouse(self, event):\n\n self.Refresh()\n event.Skip()", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def mouse_left_up(self):\n pass", "def update(self):\r\n # Get where the mouse is\r\n pos = pygame.mouse.get_pos()\r\n # Set the left side of the player bar to the mouse position\r\n self.rect.x = pos[0]\r\n # Make sure we don't push the player paddle\r\n # off the right side of the screen\r\n if self.rect.x > self.screenwidth - self.width:\r\n self.rect.x = self.screenwidth - self.width", "def _updateOnMouseState(self, state):\n x = state.X.abs\n y = state.Y.abs\n \n mscale = self.mouse_icon.getScale() \n \n if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width:\n x = x - mscale[0] - 10\n else:\n x += self.mouse_offset\n \n if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height:\n y = y - mscale[1] - 10\n else:\n y += self.mouse_offset\n \n self.mouse_icon.setPosition((x, y))", "def on_mouse_motion(x, y, dx, dy):\n if in_box(x, y):\n # Change the cursor if inside the box.\n self.window.set_mouse_cursor(self.hand_cursor)\n else:\n self.window.set_mouse_cursor(self.default_cursor)", "def __mouseMoved(self, x, y):\n # Are we on the bounding box?\n if pointOnBox(x, y, self.currentBox, thickness=self.__THICKNESS):\n position = getCursorPosition(x, y, self.currentBox, thickness=self.__THICKNESS)\n cursor = [\n wx.CURSOR_SIZENWSE,\n wx.CURSOR_SIZENS,\n wx.CURSOR_SIZENESW,\n wx.CURSOR_SIZEWE,\n wx.CURSOR_SIZENWSE,\n wx.CURSOR_SIZENS,\n wx.CURSOR_SIZENESW,\n wx.CURSOR_SIZEWE\n ] [position]\n self.__setCursor(cursor)\n elif pointInBox(x, y, self.currentBox):\n self.__setCursor(wx.CURSOR_HAND)\n else:\n self.__setCursor()", "def mouseMoveEvent(self, event):\n # super(PlotWidget, self).mouseMoveEvent(event)\n event.accept()", "def update(self):\n # Get where the mouse is\n pos = pygame.mouse.get_pos()\n # Set the left side of the player bar to the mouse position\n self.rect.x = pos[0]\n # Make sure we don't push the player paddle \n # off the right side of the screen\n if self.rect.x > self.screenwidth - self.width:\n self.rect.x = self.screenwidth - self.width", "def handle_mouse_press(self, event):", "def mouse_motion_handler(self, event):\r\n\r\n self.reset_button.mouse_motion_handler(event.pos)\r\n\r\n if not self.is_game_over:\r\n tile = self.board.get_event_tile(event.pos)\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)\r\n self.update_reset_button()", "def move_mouse(self, pos):\n dx, dy = self.distance_from_crosshairs(pos[0], pos[1])\n pag.move(dx, dy)", "def ev_MOUSEUP(self, event):", "def mouse_enter(self):\n pass", "def mousePressEvent(self, ev):\n super(PlotObject, self).mousePressEvent(ev)\n self._downpos = self.mousePos", "def mouseMoveEvent (self, event):\n self.itemMoved = True\n super(DiagramItem, self).mouseMoveEvent(event)", "def mousePressEvent(self, event):\n self.begin = event.pos()\n self.end = event.pos()\n self.update()", "def follow_mouse(self, mouse):\n half_width = self.width() / 2\n self.left = mouse.get_x() - half_width\n self.right = mouse.get_x() + half_width", "def normal_mouse_move(self, event):\n plot = self.component\n if plot is not None:\n if isinstance(plot, BaseXYPlot):\n ndx = plot.map_index((event.x, event.y), index_only = True)\n x = plot.index.get_data()[ndx]\n y = plot.value.get_data()[ndx]\n print self.format % (x,y)\n else:\n print \"dataprinter: don't know how to handle plots of type\",\n print plot.__class__.__name__\n return", "def ev_windowmoved(self, event: WindowMoved) -> None:", "def mouseMoveEvent(self, event):\n if self.line:\n self.line.setLine(QLineF(self.line.line().p1(), event.scenePos()))\n\n QGraphicsScene.mouseMoveEvent(self, event)\n self.update()", "def update(self):\n pygame.event.pump()\n self.pos_x -= 1.5", "def handle_event(self, event):\n if event.type != MOUSEMOTION:\n return\n self.model.slider.left = event.pos[0]", "def movement(self):", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def moveEvent(self, *args, **kwargs):\n self.windowMoved.emit()", "def ev_mousemotion(self, event: tcod.event.MouseMotion) -> T | None:", "def move( self, event ):\n self.lastMotion = time()\n if self.follow == False: # If the follow flag is not set, motion within the widget will make the ToolTip dissapear\n self.withdraw()\n self.visible = 1\n self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n try:\n self.msgVar.set( self.msgFunc() ) # Try to call the message function. Will not change the message if the message function is None or the message function fails\n except:\n pass\n self.after( int( self.delay * 1000 ), self.show )", "def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)", "def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)", "def emitMouseMoveEvent(self, location, currentKbKey, draggedItems, items):\n # emit the mouseMoveEvent signal\n self.mouseMove.emit(self, location, currentKbKey, draggedItems, items)", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide() # New for Rev2.0\r\n \r\n if self.left <0:\r\n self.left = 0\r\n \r\n if self.right > games.screen.width:\r\n self.right = games.screen.width\r\n \r\n self.check_collide()", "def move_start(event):\n nonlocal x, y\n x = event.x \n y = event.y\n window['cursor'] = utils.CURSORS['move_item']", "def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):\n self.mouse_x = x\n self.mouse_y = y\n \n # set the rifle angle in degrees\n self.rifle.angle = self._get_angle_degrees(x, y)", "def mouseMoveEvent(self, event: 'QGraphicsSceneMouseEvent'):\n new_cursor_position = event.scenePos() # mouse cursor in scene coordinates\n old_cursor_position = event.lastScenePos()\n offset_x = new_cursor_position.x() - old_cursor_position.x()\n offset_y = new_cursor_position.y() - old_cursor_position.y()\n if self.move_all is False:\n \"\"\"Update single disk\"\"\"\n old_top_left_corner = self.scenePos()\n new_top_left_corner_x = offset_x + old_top_left_corner.x()\n new_top_left_corner_y = offset_y + old_top_left_corner.y()\n self.setPos(QPointF(new_top_left_corner_x, new_top_left_corner_y)) # update disk top left corner\n else:\n \"\"\"Call parent to update everybody\"\"\"\n self.parentItem().move_everybody(offset_x, offset_y)", "def move(self, event):\n self.lastMotion = time()\n # If the follow flag is not set, motion within the\n # widget will make the ToolTip disappear\n #\n if self.follow is False:\n self.withdraw()\n self.visible = 1\n\n # Offset the ToolTip 10x10 pixes southwest of the pointer\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\n try:\n # Try to call the message function. Will not change\n # the message if the message function is None or\n # the message function fails\n self.msgVar.set(self.msgFunc())\n except:\n pass\n self.after(int(self.delay * 1000), self.show)", "def callback_handle_left_mouse_motion(self, event):\n\n # TODO: update this for the case where there is no current shape id\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n self.move(self.variables.image_id, x_dist, y_dist)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n t_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = numpy.asarray(t_coords) + x_dist\n new_coords_y = numpy.asarray(t_coords) + y_dist\n new_coords[1::2] = new_coords_y[1::2]\n if vector_object.image_drag_limits:\n canvas_limits = self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n x_vertices = new_coords[0::2]\n y_vertices = new_coords[1::2]\n within_x_limits = True\n within_y_limits = True\n for x_vertex in x_vertices:\n if canvas_limits[2] < x_vertex or x_vertex < canvas_limits[0]:\n within_x_limits = False\n for y_vertex in y_vertices:\n if y_vertex < canvas_limits[1] or y_vertex > canvas_limits[3]:\n within_y_limits = False\n if not within_x_limits:\n new_coords[0::2] = t_coords[0::2]\n if not within_y_limits:\n new_coords[1::2] = t_coords[1::2]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n new_coords,\n update_pixel_coords=True)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n previous_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n coord_x_index = self.variables.tmp_closest_coord_index*2\n coord_y_index = coord_x_index + 1\n new_coords = list(previous_coords)\n new_coords[coord_x_index] = event.x\n new_coords[coord_y_index] = event.y\n if vector_object.image_drag_limits:\n drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = \\\n self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n if new_coords[coord_x_index] < drag_x_lim_1:\n new_coords[coord_x_index] = drag_x_lim_1\n if new_coords[coord_x_index] > drag_x_lim_2:\n new_coords[coord_x_index] = drag_x_lim_2\n if new_coords[coord_y_index] < drag_y_lim_1:\n new_coords[coord_y_index] = drag_y_lim_1\n if new_coords[coord_y_index] > drag_y_lim_2:\n new_coords[coord_y_index] = drag_y_lim_2\n\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, tuple(new_coords))\n elif self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.SELECT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (event.x, event.y))", "def _on_mouse(self, event):\n x, y = event.GetPosition()\n if self._drag_mode == DepthCanvas.SASH_DRAG_NONE: \n self._canvas_hit_test(x, y) \n if event.LeftDown():\n self.start_dragging(y)\n elif self._drag_mode == DepthCanvas.SASH_DRAG_DRAGGING:\n if event.LeftIsDown():\n self.drag_it(y) \n elif event.LeftUp():\n self.end_dragging()\n event.Skip()", "def _onMotionNotify(self, widget, event):\n\t\tif self.fullscreenToggle:\n\t\t\tmove = [event.x - self.mouseStart[0], event.y - self.mouseStart[1]]\n\t\t\tnewPos = [self.imgPosStart[0] - move[0], self.imgPosStart[1] - move[1]]\n\t\t\tself.moveImage(newPos[0], newPos[1])", "def onMove(self, event):\n\t\tif (event.xdata != None and event.ydata != None and event.xdata != self.xdata and event.ydata != self.ydata):\n\n\t\t\tself.xdata = event.xdata\n\t\t\tself.ydata = event.ydata\n\n\t\t\tfor loop in range(4):\n\t\t\t\tself.stokesFig.canvas.restore_region(self.background[loop])\n\t\t\t\tself.obsStokes[loop].set_ydata(self.stokes[loop][event.ydata, event.xdata, :])\n\t\t\t\tself.axStokes[loop].draw_artist(self.obsStokes[loop])\n\t\t\t\tself.axStokes[loop].draw_artist(self.axStokes[loop].get_yaxis())\n\t\t\t\tself.stokesFig.canvas.blit(self.axStokes[loop].bbox.expanded(1.4, 1.1))", "def mouseMoveEvent(self, e):\r\n \r\n self.label.setText('mouseMoveEvent')", "def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos", "def handle_pygame_event(self, event):\n if event.type != MOUSEMOTION:\n # nothing to do\n return\n self.model.paddle.x = event.pos[0]-self.model.paddle.width/2.0", "def on_dragg(self, event):\n if str(event.lastevent.button) == \"MouseButton.LEFT\":\n mX = event.xdata\n mY = event.ydata\n if mX and mY:\n if self.current_point is not None:\n self.x[self.current_point] = mX\n self.y[self.current_point] = mY\n self.redraw()", "def move(self):\n self.val = (pygame.mouse.get_pos()[\n 0] - self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini\n if self.val < self.mini:\n self.val = self.mini\n if self.val > self.maxi:\n self.val = self.maxi", "def motion(self, event):\n dx = event.x - self.dragx\n dy = event.y - self.dragy\n\n self.dragx = event.x\n self.dragy = event.y\n\n self.canvas.move(self.tags, dx, dy)\n self.diag.update_arrows()", "def mouseMoveEvent(self, event):\n if self.mousenode is not None:\n self.mousenode.setPos(event.scenePos())\n return QtGui.QGraphicsScene.mouseMoveEvent(self, event)", "def OnIdle(self, event):\r\n\r\n if self._moving: \r\n if _VERSION_STRING < \"2.9\":\r\n leftDown = wx.GetMouseState().LeftDown()\r\n else:\r\n leftDown = wx.GetMouseState().LeftIsDown()\r\n\r\n if not leftDown:\r\n self._moving = False\r\n self.OnMoveFinished()\r\n else: \r\n event.RequestMore()", "def handle_motion(self, x, y):\n if self.pressed_flag:\n self.last_point = (x, y)\n\n # trigger canvas to redraw itself\n self.redraw()", "def mouseMoveEvent(self, mouseEvent):\n QGraphicsScene.mouseMoveEvent(self, mouseEvent)\n if not mouseEvent.isAccepted() and mouseEvent.buttons() == Qt.LeftButton:\n delta = mouseEvent.lastScreenPos() - mouseEvent.screenPos()\n self.translate(delta.x(), delta.y())", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return", "def mouseMoveEvent(self, e):\n if e.pos().y() == self.offset:\n return\n adder = (self.offset - e.y())\n self.deltacount += adder\n #adder *= self.accelerator\n adder *= (abs(adder) * 0.01)\n #self._state[0] = max(self._min[0], min(self._max[0], self._state[0] + adder))\n QtGui.qApp.emit( QtCore.SIGNAL(\"deltaChanged\"), self, adder)\n #self._param.update()\n QtGui.QCursor.setPos(self.origo)", "def OnMouseDown(self, evt):\n self.CaptureMouse()\n self.x, self.y = self.lastx, self.lasty = evt.GetPosition()", "def OnMouse(self, event):\n if not event.Dragging():\n self._dragPos = None\n if self.HasCapture():\n self.ReleaseMouse()\n return\n else:\n if not self.HasCapture():\n self.CaptureMouse()\n\n if not self._dragPos:\n self._dragPos = event.GetPosition()\n else:\n pos = event.GetPosition()\n displacement = self._dragPos - pos\n self.SetPosition(self.GetPosition() - displacement)", "def update(self, mouse_pos):\n if self.blocked:\n return\n if hasattr(self, 'collide_rect'):\n rect = self.collide_rect\n else:\n rect = self.rect\n hover = rect.collidepoint(mouse_pos)\n if hover:\n self.image = self.hover_image\n else:\n self.image = self.idle_image\n self.hover = hover", "def setupEventHooks(self):\n # handle mouse clicks\n self.img.scene().sigMouseClicked.connect(self.handleClick)\n # handle mouse movement\n # Use signalproxy for ratelimiting\n sig = self.img.scene().sigMouseMoved\n self.mvProxy = pqg.SignalProxy(signal=sig, rateLimit=60, slot=self.handleMove)", "def handle_mousemotion(self, change):\r\n if widget.Widget.handle_mousemotion(self, change):\r\n app.App.handle_mousemotion(self, change)\r\n return True\r\n return False", "def mouseMoveEvent(self, event):\n if self._ignore_mouse_events:\n event.ignore()\n return\n\n event.accept()\n\n if self._selection_mode != SelectionMode.NONE:\n x = event.x()\n y = event.y()\n xdiff = float(x - self._selection_position_start[0])\n ydiff = float(y - self._selection_position_start[1])\n if abs(xdiff) < 0.0001:\n xdiff = 1\n if abs(ydiff) < 0.0001:\n ydiff = 1\n xoff = float(self._selection_position_start[0]) / xdiff + 0.5\n yoff = float(self._selection_position_start[1]) / ydiff + 0.5\n self._addUpdateSelectionBox(xdiff, ydiff, xoff, yoff)\n\n elif self._use_zinc_mouse_event_handling:\n scene_input = self._sceneviewer.createSceneviewerinput()\n scene_input.setPosition(event.x(), event.y())\n scene_input.setEventType(Sceneviewerinput.EVENT_TYPE_MOTION_NOTIFY)\n if event.type() == QtCore.QEvent.Leave:\n scene_input.setPosition(-1, -1)\n self._sceneviewer.processSceneviewerinput(scene_input)" ]
[ "0.8003781", "0.7950266", "0.7752839", "0.76657957", "0.76657957", "0.7528036", "0.7506672", "0.7437439", "0.7398488", "0.73642653", "0.73512393", "0.7318702", "0.73072326", "0.7295303", "0.72917354", "0.727809", "0.71965337", "0.7163269", "0.716054", "0.7160027", "0.7155508", "0.7129977", "0.71299565", "0.7100384", "0.7091292", "0.70608443", "0.7016568", "0.70114195", "0.6992546", "0.6986753", "0.69756204", "0.6946765", "0.6938703", "0.6934148", "0.69246304", "0.68962455", "0.6865613", "0.68613774", "0.6861051", "0.6838211", "0.6836769", "0.6820996", "0.6820996", "0.68156254", "0.68047917", "0.67945147", "0.67839247", "0.6768902", "0.6754158", "0.6711137", "0.66746265", "0.6666924", "0.66624016", "0.6651431", "0.6638144", "0.6637502", "0.66192997", "0.66126186", "0.6607154", "0.65825886", "0.658024", "0.65729594", "0.65529233", "0.6552074", "0.6534406", "0.65296614", "0.65296614", "0.65232855", "0.65136355", "0.6509608", "0.6499632", "0.6499632", "0.64821035", "0.64765006", "0.64757055", "0.64753795", "0.6474107", "0.6466792", "0.6461259", "0.64588445", "0.64501673", "0.64415634", "0.6439109", "0.64219224", "0.6418044", "0.6410835", "0.6403517", "0.63957036", "0.63935435", "0.6380583", "0.63745755", "0.6370352", "0.6361941", "0.6358209", "0.6347163", "0.634467", "0.6340359", "0.6338638", "0.6337406", "0.6336221" ]
0.77163106
3
Called when the user presses a mouse button.
def on_mouse_press(self, x, y, button, modifiers): menu: Menu = self.get_menu_for_display() menu_click_x, menu_click_y = self.get_menu_click(menu, x, y) if button == arcade.MOUSE_BUTTON_LEFT: if menu: menu.button_list.check_mouse_press_for_buttons( menu_click_x, menu_click_y, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_press(self, x, y, button):\n\n pass", "def handle_mouse_press(self, event):", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def input(self, event: pygame.event) -> None:\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.user_clicked = True", "def on_mouse_release(self, x, y, button):\n pass", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def ev_MOUSEUP(self, event):", "def handle_mouse(self, x, y):\n pass", "def _press(self, event):", "def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp) -> T | None:", "def ev_MOUSEDOWN(self, event):", "def on_mouse_press(self, x, y, button, modifiers):\n\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n\n if self.exclusive:\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n else:\n self.set_exclusive_mouse(True)", "def _on_pyglet_mouse_click(self, x, y, button, modifiers):\n button_time = clock()\n this_button = self._button_names[button]\n self._mouse_buffer.append((this_button, x, y, button_time))", "def handle_mouse_click(self, button: Button) -> None:\n if button.name == 'BACK':\n self._clear_all_input()\n self.current_page -= 1\n self._focused_button = None\n if self.current_page == len(self.pages) - 2:\n self.current_page -= 1\n elif button.name == 'Show Graph':\n self._plot_graph()\n elif button.name == 'Multiple Regression':\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 2\n self._update_ghg_coefs()\n elif button.tag == 'normal' and self.current_page < len(self.pages) - 2:\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 1\n elif isinstance(button, InputButton):\n self._focused_button = button", "def mousePressEvent(self, mouse_event):\r\n return", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)", "def HandButton(self, event):\n pass", "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def mouse_click(self,x,y,button,double_click):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def _onMouseButton(e):\n\n mouse_event = MouseButtonEvent(e)\n orca_state.lastInputEvent = mouse_event\n\n # A mouse button event looks like: mouse:button:1p, where the\n # number is the button number and the 'p' is either 'p' or 'r',\n # meaning pressed or released. We only want to stop speech on\n # button presses.\n #\n if mouse_event.pressed:\n speech.stop()", "def on_press(self):\n self.pressed = True", "def on_press(self):\n self.pressed = True", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n self.window.show_view(GameView())", "def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass", "def emitPressEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mousePressEvent signal\n self.mousePress.emit(self, clickLocation, button, currentKbKey, items)", "def LeftClick(self):\n self._PressLeftButton()\n self._ReleaseAllButtons()", "def press(self):\n self.clicked = True\n if self.command:\n self.command(self.name)", "def mousePressEvent(self, event):\n if event.buttons() == QtCore.Qt.LeftButton:\n self.view_state.mouse = np.array([event.x(), event.y()])", "def on_mouse_press(self, x, y, button, modifiers):\n\n # Change states as needed.\n if self.current_state == GAME_RUNNING_PAGE:\n pass\n else:\n # Restart the game.\n self.setup()\n self.score=0\n self.current_state = GAME_RUNNING_PAGE", "def set_mouseclick_handler(self, mouse_handler):\n STmouse.Mouse(self.canvas, '<Button-1>', mouse_handler)", "def __check_if_got_pressed(self):\n mouse_x_pos,mouse_y_pos = pg.mouse.get_pos()\n\n if utilitiez.on_object(self.rect.x, self.rect.y, self.rect.width, self.rect.height, mouse_x_pos, mouse_y_pos,\n MOUSE_WIDTH, MOUSE_HEIGHT):\n self.__on_click()", "def handle_press( self, x, y ):\n self.pressed_flag = True\n self.first_point = (x, y)", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = GameView()\n self.window.show_view(game_view)", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = GameView()\n self.window.show_view(game_view)", "def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())", "def on_mouse_press(self, event):\n self.on_mouse_wheel(event)", "def on_mouse_press(self, x, y, button, modifiers):\n self.add_wall()", "def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())", "def mouse_release(self):\n\n # play button press\n if self.play_button.is_active:\n # change to gameplay\n self.switch_context(game.GameContext)", "def ev_mousebuttondown(self, event):\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)", "def button_press_event(self, widget, event):\n x, y = event.x, event.y\n\n # x, y = coordinates where the button was pressed\n self.last_win_x, self.last_win_y = x, y\n\n button = 0\n # Prepare a button mask with bits set as follows:\n # left button: 0x1\n # middle button: 0x2\n # right button: 0x4\n # Others can be added as appropriate\n self.logger.debug(\"button down event at %dx%d, button=%x\" % (x, y, button))\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('button-press', button, data_x, data_y)", "def mousePressed(): \n if not game_controller.game_over:\n # Creatr new disk only when there's no disk\n # or there's a disk that stopped falling\n if (not game_controller.falling_disk) or \\\n (game_controller.falling_disk and \\\n game_controller.falling_disk.y_vel == 0):\n game_controller.handle_mousePressed(mouseX, mouseY)", "def eventHandler(self, event: pygame.event):\n # change selected color if this button's rectangle was clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.rect.collidepoint(event.pos): # is mouse over button\n self.image = self._images[ButtonImages.CLICKING_IMAGE.value]\n self.beingClicked = True\n for func, *args in self.functionsToInvokeWhenClicked:\n func(*args)\n elif event.type == pygame.MOUSEBUTTONUP and self.beingClicked:\n if event.button == 1:\n self.beingClicked = False\n self.image = self._images[ButtonImages.DEFAULT_IMAGE.value]", "def mouse_left_down(self):\n pass", "def mousePressed(self, _evt, _id):\n if not self.is_enabled: return False\n \n self.mouse_icon.mousePressed(_evt, _id)\n return False", "def handle_events(self) -> None:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n self.mouse_pos = event.pos\n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_pos = event.pos\n self.mouse_clicked = True\n elif self._focused_button is not None and event.type == KEYDOWN:\n self._handle_key_press(event)", "def mousePressEvent(self, ev):\n super(PlotObject, self).mousePressEvent(ev)\n self._downpos = self.mousePos", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = GameView()\n game_view.setup()\n self.window.show_view(game_view)", "def leftButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.LEFT_BUTTON)", "def event_handler(self):\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == MOUSEBUTTONDOWN and event.button == LEFT_CLICK:\r\n self.left_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == LEFT_CLICK:\r\n self.left_mouse_up_handler(event)\r\n elif event.type == MOUSEBUTTONDOWN and event.button == RIGHT_CLICK:\r\n self.right_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == RIGHT_CLICK:\r\n self.right_mouse_up_handler(event)\r\n elif event.type == MOUSEMOTION:\r\n self.mouse_motion_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button in [2, 4, 5]:\r\n self.shortcut_click(event)", "def on_mouse_up(self, pos, mouse_button):\n for item in button.Button.all_buttons:\n if item.collidepoint(pos):\n self.buttons_clicked.append((item, mouse_button))\n item.on_click(mouse_button)", "def mousePressEvent(self, event):\n self._use_zinc_mouse_event_handling = False # Track when zinc should be handling mouse events\n if self._ignore_mouse_events:\n event.ignore()\n return\n\n event.accept()\n if event.button() not in button_map:\n return\n \n self._selection_position_start = (event.x(), event.y())\n\n if button_map[event.button()] == Sceneviewerinput.BUTTON_TYPE_LEFT\\\n and self._selectionKeyPressed and (self._nodeSelectMode or self._elemSelectMode):\n self._selection_mode = SelectionMode.EXCLUSIVE\n if event.modifiers() & QtCore.Qt.SHIFT:\n self._selection_mode = SelectionMode.ADDITIVE\n else:\n scene_input = self._sceneviewer.createSceneviewerinput()\n scene_input.setPosition(event.x(), event.y())\n scene_input.setEventType(Sceneviewerinput.EVENT_TYPE_BUTTON_PRESS)\n scene_input.setButtonType(button_map[event.button()])\n scene_input.setModifierFlags(modifier_map(event.modifiers()))\n self._sceneviewer.processSceneviewerinput(scene_input)\n self._use_zinc_mouse_event_handling = True", "def click(self, mouse_pos):\n for button in self.enabled_buttons(): # type: Button\n if button.is_position_on_button(mouse_pos):\n self.sound.play_sound(self.click_sound)\n button.click()", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = MainGame()\n game_view.setup()\n self.window.show_view(game_view)", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = MainGame()\n game_view.setup()\n self.window.show_view(game_view)", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = MainGame()\n game_view.setup()\n self.window.show_view(game_view)", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = MainGame()\n game_view.setup()\n self.window.show_view(game_view)", "def _press(self, event):\n # make the drawn box/line visible get the click-coordinates,\n # button, ...\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if ((self._active_handle is None or not self._interactive) and\n self._allow_creation):\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n if (self._active_handle is None and not self.ignore_event_outside and\n self._allow_creation):\n x = event.xdata\n y = event.ydata\n self._visible = False\n self.extents = x, x, y, y\n self._visible = True\n else:\n self.set_visible(True)\n\n self._extents_on_press = self.extents\n self._rotation_on_press = self._rotation\n self._set_aspect_ratio_correction()\n\n return False", "def mousePressEvent(self, event): \n if event.type() == qtc.QEvent.MouseButtonPress:\n if event.button() == qtc.Qt.RightButton:\n self.right_click_event()\n\n elif event.button() == qtc.Qt.LeftButton:\n self.left_click_event(event)\n self.mouseStartPosY = event.pos().y()\n self.startValue = self.value()", "def on_mouse_press(self, x, y, button, modifiers):\n\n # Change states as needed.\n if self.current_state == INSTRUCTIONS_PAGE:\n # Next page of instructions.\n self.current_state = GAME_RUNNING\n # Start the game\n self.setup()\n self.current_state = GAME_RUNNING\n elif self.current_state == GAME_OVER:\n # Restart the game.\n self.setup()\n self.current_state = GAME_RUNNING", "def on_left_mouse_click(self, event: Event) -> None:\n\t\tself.mouse_state.set_click(event.x, event.y)", "def OnMouseUp(self, evt):\n self.ReleaseMouse()", "def mouse_left_up(self):\n pass", "def on_key_press(self, event):\n\n #print(\"you pressed {}\".format(event.key))\n key_press_handler(event, self.canvas, self.toolbar)", "def handle_mouse_data(data):\n pass", "def press(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, button_down=True, button_up=False)", "def on_mouse_press(self, x: float, y: float, button, modifiers):\n #dialogue buttons\n check_mouse_press_for_buttons(x, y, self.levels[self.current_level].dialogue_list)\n\n #room info prompt buttons\n check_mouse_press_for_buttons(x, y, self.levels[self.current_level].room_info_list)", "def button_press_cb(self, source, event):\n\n if event.button == MOUSE_BUTTON_RIGHT:\n pass\n return True\n elif event.button == MOUSE_BUTTON_MIDDLE:\n self.emit('begin-move')\n return True", "def HandlePress(self, event: tkEvent):\n pass", "def click(point):\n m = PyMouse()\n m.move(*point)\n m.press(*point)\n m.release(*point)", "def on_mouse_press(self, x, y, button, key_modifiers):\n self.heldLetter = (arcade.get_sprites_at_point((x, y), self.active_blocks) or [None])[0]", "def onMouseLeftDown(self, event):\n # [NOTE] No need to call self.choice(). It is enough to call\n # event.Skip() and the machine will be called self.OnButtonClick()\n event.Skip()", "def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)", "def on_mouse_click(self, e):\n if 'Control' in e.modifiers:\n # Get mouse position in NDC.\n box_id, _ = self.canvas.stacked.box_map(e.pos)\n channel_id = np.nonzero(self.channel_y_ranks == box_id)[0]\n # Find the spike and cluster closest to the mouse.\n db = self.data_bounds\n # Get the information about the displayed spikes.\n wt = [(t, s, c, ch) for t, s, c, ch in self._waveform_times if channel_id in ch]\n if not wt:\n return\n # Get the time coordinate of the mouse position.\n mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos)\n mouse_time = Range(NDC, db).apply(mouse_pos)[0][0]\n # Get the closest spike id.\n times, spike_ids, spike_clusters, channel_ids = zip(*wt)\n i = np.argmin(np.abs(np.array(times) - mouse_time))\n # Raise the select_spike event.\n spike_id = spike_ids[i]\n cluster_id = spike_clusters[i]\n emit('select_spike', self, channel_id=channel_id,\n spike_id=spike_id, cluster_id=cluster_id)\n\n if 'Shift' in e.modifiers:\n # Get mouse position in NDC.\n box_id, _ = self.canvas.stacked.box_map(e.pos)\n channel_id = int(np.nonzero(self.channel_y_ranks == box_id)[0][0])\n emit('select_channel', self, channel_id=channel_id, button=e.button)", "def _press(self, event):\n # Check for selection of a tool handle.\n if ((self._selection_completed or 'move_vertex' in self._state)\n and len(self._xys) > 0):\n h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)\n if h_dist < self.grab_range:\n self._active_handle_idx = h_idx\n # Save the vertex positions at the time of the press event (needed to\n # support the 'move_all' state modifier).\n self._xys_at_press = self._xys.copy()", "def _on_key_press(self, event):", "def click(self, x, y, button, press):\n\n if self.is_in_screen(x, y) and not self.pause:\n self.get_color(x, y)\n self.record(x, y, button, press)", "def on_mouse_release(self, x, y, button, modifiers):\n \n menu: Menu = self.get_menu_for_display()\n\n menu_click_x, menu_click_y = self.get_menu_click(menu, x, y)\n\n if button == arcade.MOUSE_BUTTON_LEFT:\n if menu:\n menu.button_list.check_mouse_release_for_buttons(\n menu_click_x,\n menu_click_y,\n )", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def get_event(self, event):\n if event.type == pg.MOUSEBUTTONDOWN and event.button == 1:\n if self.rect.collidepoint(event.pos):\n self.toggle()", "def pressed(self) -> bool:\n return self.type == \"JOYBUTTONDOWN\"", "def doubleclick(point):\n m = PyMouse()\n m.press(*point)\n m.release(*point)\n m.press(*point)\n m.release(*point)", "def m_press(self, button: MButton):\n pass", "def keyPressEvent(self, event):\n self.game_engine.input_manager.keyPressEvent(event)", "def set_pressed(self):\n self._pressed = True", "def key_press_event(self, event):\n pass", "def mouse_right_down(self):\n pass", "def HandleKeyboardInput(self):\n key = yg.getKeyPress()\n if key == \"Return\":\n self.buttons[len(self.buttons) - 1].Click()", "def release(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, button_down=False, button_up=True)", "def mousePressEvent(self, event):\n\t\t#print('mousePressEvent() event:', event)\n\t\tif event.button() == QtCore.Qt.RightButton:\n\t\t\tself.showRightClickMenu(event.pos())\n\t\t\tself.mouseReleaseEvent(event)\n\t\telse:\n\t\t\tevent.setAccepted(False)\n\t\t\tsuper().mousePressEvent(event)", "def check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n position = pygame.mouse.get_pos()\n self.left_click(position)", "def on_mouse_press(self, x, y, button, modifiers):\n\n # Change the x/y screen coordinates to grid coordinates\n column = int(x // (WIDTH + MARGIN))\n row = int(y // (HEIGHT + MARGIN))\n\n # print(f\"Click coordinates: ({x}, {y}). Grid coordinates: ({row}, {column})\")\n\n # Make sure we are on-grid. It is possible to click in the upper right\n # corner in the margin and go to a grid location that doesn't exist\n # AKA: make sure you are clicking w/in the grid - TH\n if row < ROW_COUNT and column < COLUMN_COUNT:\n # Flip the location between 1 and 0.\n # this will reset value for the recreate grid\n # and change the color - TH\n # if self.grid[row][column] == 0:\n # self.grid[row][column] = self.num_key\n # else:\n # self.grid[row][column] = 0\n self.current_selected = (row, column)\n\n self.recreate_grid()", "def _press(self, event):\n self._set_cursor(True)\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if self._active_handle is None or not self._interactive:\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n # self._pressv and self._prev are deprecated but we still need to\n # maintain them\n self._pressv = v\n self._prev = self._get_data(event)\n\n if self._active_handle is None and not self.ignore_event_outside:\n # when the press event outside the span, we initially set the\n # visibility to False and extents to (v, v)\n # update will be called when setting the extents\n self._visible = False\n self.extents = v, v\n # We need to set the visibility back, so the span selector will be\n # drawn when necessary (span width > 0)\n self._visible = True\n else:\n self.set_visible(True)\n\n return False", "def on_click(self, x, y):\n self.menu_pointer.on_click(x, y)" ]
[ "0.8571433", "0.8407928", "0.8234308", "0.82193017", "0.8075841", "0.7850791", "0.7829097", "0.7808337", "0.77429193", "0.76497364", "0.74960554", "0.7452911", "0.7409987", "0.7394991", "0.73922163", "0.7388577", "0.73728776", "0.734544", "0.72436446", "0.7235839", "0.7232465", "0.71993625", "0.71872914", "0.71835756", "0.7157547", "0.7089445", "0.7086795", "0.7086795", "0.7062933", "0.7049327", "0.7043442", "0.7021644", "0.70027727", "0.6996707", "0.6992896", "0.69917893", "0.6950744", "0.69468164", "0.69314164", "0.69314164", "0.6915992", "0.69135517", "0.6913389", "0.6912665", "0.6903191", "0.68636715", "0.68437463", "0.682415", "0.6813178", "0.6784379", "0.67797124", "0.67759454", "0.67682576", "0.6766497", "0.67603105", "0.6757321", "0.67287165", "0.67271006", "0.6725989", "0.6715135", "0.6715135", "0.6715135", "0.6715135", "0.6713616", "0.6708276", "0.6705682", "0.6699822", "0.6695097", "0.66578966", "0.6651133", "0.66474986", "0.66454625", "0.6640596", "0.6631628", "0.661678", "0.6608832", "0.66035247", "0.660255", "0.6596424", "0.65782183", "0.65754604", "0.6570608", "0.6560627", "0.65597653", "0.65589184", "0.65524805", "0.6530938", "0.65298176", "0.65296185", "0.6525001", "0.6522821", "0.6512102", "0.6510881", "0.6494226", "0.64903194", "0.6475345", "0.6472338", "0.64703494", "0.6466866", "0.6464044" ]
0.70223546
31
Translate to the arcade screen pixel position to coordinate values relative to the menu. Used for determining if a button has been clicked.
def get_menu_click(self, menu, x, y): menu_click_x = None menu_click_y = None if menu: menu_center_x, menu_center_y, menu_cords = self.get_menu_coords(menu) menu_click_x = menu.width - (SCREEN_WIDTH - x - menu_cords[0][0]) menu_click_y = menu.height + (SCREEN_HEIGHT - y - menu_cords[0][1]) # Transform the values for out of bounds values if menu_click_x > menu.width or menu_click_x < 0: menu_click_x = None if menu_click_y > menu.height or menu_click_y < 0: menu_click_y = None return menu_click_x, menu_click_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mouse_action(self, pos, pygame):\r\n surface = pygame.display.get_surface()\r\n\r\n width = surface.get_width()\r\n height = surface.get_height()\r\n # get window size\r\n\r\n button_width = width / 5\r\n button_height = height / 6\r\n # calculate button size\r\n\r\n pixel_x, pixel_y = pos\r\n # get user interact position\r\n\r\n # check which button that user interact\r\n # all the conditional statements deal with what the user selects\r\n # on the screen. There are 25 buttons and hence that many conditional\r\n # statements\r\n if 0 < pixel_x < button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[0]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[1]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[2]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[3]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[4]\r\n elif 0 < pixel_x < button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[5]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[6]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[7]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[8]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[9]\r\n elif 0 < pixel_x < button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[10]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[11]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[12]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[13]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[14]\r\n elif 0 < pixel_x < button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[15]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[16]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[17]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[18]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[19]\r\n elif 0 < pixel_x < button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[20]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[21]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[22]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[23]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[24]", "def main_menu_buttons(self):\n button_start_position = self.draw_button(MAIN_MENU_BUTTON['button_start']['image'],\n MAIN_MENU_BUTTON['button_start']['y'])\n button_exit_position = self.draw_button(MAIN_MENU_BUTTON['button_exit']['image'],\n MAIN_MENU_BUTTON['button_exit']['y'])\n\n if button_start_position[0] + button_start_position[2] > self.mouse[0] > button_start_position[0] and \\\n button_start_position[1] + button_start_position[3] > self.mouse[1] > button_start_position[1]:\n\n pygame.mouse.set_cursor(*pygame.cursors.diamond)\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[4], MAIN_MENU_BUTTON['button_start']['y'])\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[1], MAIN_MENU_BUTTON['button_exit']['y'])\n\n for self.click in pygame.event.get():\n if self.click.type == pygame.MOUSEBUTTONDOWN and self.click.button == 1:\n self.surface.fill((30, 30, 30))\n pygame.mouse.set_cursor(*pygame.cursors.tri_left)\n self.switch_scene = True\n\n elif button_exit_position[0] + button_exit_position[2] > self.mouse[0] > button_exit_position[0] and\\\n button_exit_position[1] + button_exit_position[3] > self.mouse[1] > button_exit_position[1]:\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[0], MAIN_MENU_BUTTON['button_start']['y'])\n pygame.mouse.set_cursor(*pygame.cursors.diamond)\n self.draw_button(BUTTON_NAME_FOR_MAIN_MENU[5], MAIN_MENU_BUTTON['button_exit']['y'])\n\n for self.click in pygame.event.get():\n if self.click.type == pygame.MOUSEBUTTONDOWN and self.click.button == 1:\n self.isrunning = False\n\n else:\n # set standard cursor\n pygame.mouse.set_cursor(*pygame.cursors.tri_left)", "def action_to_coords(self, x, y):\n self.scene.center_on(x, y)", "def update(self, screen: pygame.Surface):\n key = pygame.key.get_pressed()\n # User clicks left and position not out of screen\n if key[pygame.K_LEFT] and self.rect.x > 0:\n self.rect.centerx -= 3\n # User clicks right and position not out of screen\n if key[pygame.K_RIGHT] and self.rect.x < S_Width - 50:\n self.rect.centerx += 3\n\n screen.blit(self.game_obj, (self.rect.x, self.rect.y))", "def update(self):\r\n if self.right > games.screen.width or self.left < 0:\r\n self.dx = -self.dx\r\n \r\n if self.bottom > games.screen.height or self.top < 0:\r\n self.dy = -self.dy", "def click_aim(self, pos):\n x, y = pos\n if (self.x - x) ** 2 + (self.y - y) ** 2 <= self.r ** 2:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(50, 100)\n self.speed_x = randint(-200, 200)\n self.speed_y = randint(-200, 200)\n return True\n else:\n return False", "def update(self):\n \n self.rect.x += self.change_x\n self.rect.y += self.change_y\n \n if self.rect.x < 0:\n self.rect.x = 0\n if self.rect.x > screen_width - 60:\n self.rect.x = screen_width - 60\n if self.rect.y < 0:\n self.rect.y = 0 \n \n if self.rect.y > screen_height - 60:\n self.rect.y = screen_height - 60", "def update(self):\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0 or self.bottom > games.screen.height:\n self.dy = -self.dy", "def translate(self,id,dx,dy):\n if id not in self.elements.keys():\n print(\"Id input not registered! Please check your process\")\n return False\n element=self.elements[id]\n state=element.translate(-dy,dx,self.w,self.h)\n if state==True:\n self.canvas=np.ones((self.h,self.w,3),dtype=np.uint8)*255\n self.sync=False\n return state", "def mousePosition(self):", "def clicked(self, x_pos, y_pos):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= x_pos >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= y_pos >= self.y - img.get_height() // 2:\n return True\n return False", "def joy_callback(self, msg):\n mappings = gamepad_mappings.set_gamepad_mappings(msg)\n self.move_vertical = mappings[\"button_vertical\"] # up: +1.0, down: -1.0\n self.move_horizontal = mappings[\"button_horizontal\"] # left: +1.0, right: -1.0", "def __calculate_position(self):\r\n pygame.event.pump() # pygame handlers\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_w]: # key W\r\n self.y -= SPEED\r\n if keys[pygame.K_s]: # key S\r\n self.y += SPEED\r\n if keys[pygame.K_a]: # key A\r\n self.x -= SPEED\r\n if keys[pygame.K_d]: # key D\r\n self.x += SPEED\r\n self.rect = self.image.get_rect()\r\n self.rect.x, self.rect.y = self.x, self.y", "def update(self):\n pygame.event.pump()\n self.pos_x += 0\n if (pygame.key.get_pressed()[pygame.K_w]) and self.pos_y > 0:\n self.pos_y -= 1\n if (pygame.key.get_pressed()[pygame.K_a]) and self.pos_x > 0:\n self.pos_x -= 1\n if (pygame.key.get_pressed()[pygame.K_d]) and self.pos_x < 1080:\n self.pos_x += 1\n if (pygame.key.get_pressed()[pygame.K_s]) and self.pos_y < 360:\n self.pos_y += 1", "def get_aa_pos_on_screen(self,position,frame):\n position=position*3+float(frame)-1\n x,y=self.get_base_pos_on_screen(position)\n y=y+20.0+float(frame)*15.0\n return x,y", "def on_fruit(self):\r\n if self.grid_pos in self.app.fruit:\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0):\r\n return True\r\n # in the x-direction \r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1):\r\n return True\r\n # in the y-direction\r\n\r\n return False", "def on_mouse_move(self, event):\n\n # self.view = 1 * np.eye(4, dtype=np.float32)\n # self.model = 1 * np.eye(4, dtype=np.float32)\n\n # self.translate -= event.delta[1]\n # self.translate = max(-1, self.translate)\n # print(event.delta[1])\n # print(self.translate)\n # self.view = translate((0, 0, -self.translate))\n # self.game_program['u_view'] = self.view\n # self.game_program['u_size'] = 5 / self.translate\n # self.view = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.view\n # self.model = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.model\n # print(self.view)\n\n # self.game_program['u_model'] = self.model\n # self.game_program['u_view'] = self.view\n\n x, y = event.pos\n #print(x, y)\n self.x_offset, self.y_offset = x - self.last_x, - (y - self.last_y)\n self.last_x, self.last_y = x, y\n self.x_offset *= self.sensitivity\n self.y_offset *= self.sensitivity\n\n self.yaw, self.pitch = self.yaw - self.x_offset, self.pitch + self.y_offset\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def onPress(event):\r\n global rect\r\n if event.inaxes == None:\r\n return# Ignore clicks outside the axes\r\n contains, attr = rect.contains(event)\r\n if not contains:\r\n return# Ignore clicks outside the rectangle\r\n\r\n global initPos # Grab the global variable to update it\r\n initPos = [rect.get_x(), rect.get_y(), event.xdata, event.ydata]", "def buttonDownScenePos(self, btn=None):\n if btn is None:\n btn = self.button()\n return Point(self._buttonDownScenePos[btn])", "def rectan_button(msg,x,y,w=100,h=100,ic=green,ac=green_bright,action=None,size=20,font='freesansbold.ttf'):\n mouse = pygame.mouse.get_pos() #pobiera pozycje myszki i zwraca x w mouse[0] i y w mouse[1]\n click = pygame.mouse.get_pressed() # click[0] lewy, click[1] srodkowy , click[2] prawy przycisk myszy \n \n #print(mouse)\n a = (x+w > mouse[0] and x < mouse[0] and y+h>mouse[1] and y < mouse[1]) #warunek na to , czy pozycja myszki jest w prostokacie przycisku\n if a: \n pygame.draw.rect(gameDisplay,ac,(x,y,w,h)) #rysuje jasniejszy prostokąt, wydaje sie ze podswietlony, gdy myszka na nim.\n \n if click[0]==1 and action!=None:\n #sleep zeby sie nie wcisnely 2 przyciski jak np. wychodzisz z opcji, a w miejscu przycisku 'back' w glownym menu jest 'start'\n time.sleep(0.1)\n action() \n else:\n pygame.draw.rect(gameDisplay,ic,(x,y,w,h)) #rysuje ciemny prostokat, jesli a nie jest prawdą\n \n\n # tutaj tworzy sie napis na srodku ekranu. \n # mozna dorzucic opcje wyboru \n textfont = pygame.font.Font('freesansbold.ttf',20)\n textsurf,textrect = text_objects(msg,textfont,black)\n textrect.center = ((x+(w/2)),(y+(h/2)))\n gameDisplay.blit(textsurf,textrect)", "def calculate_screen_position(self):\r\n\r\n character_select_start_y = 604\r\n character_select_end_y = 646\r\n\r\n if self.slotNumber <= 6:\r\n start_y = 585 # 595\r\n end_y = 627 # 637\r\n x_hero_number = self.slotNumber\r\n else:\r\n start_y = 300 # 290\r\n end_y = 342 # 332\r\n x_hero_number = self.slotNumber - 6\r\n\r\n start_x = 249 + (x_hero_number * 192)\r\n end_x = 326 + (x_hero_number * 192)\r\n\r\n self.screenPositionCharacterSelect = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": character_select_start_y,\r\n \"end_y\": character_select_end_y\r\n }\r\n self.screenPositionTab = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": start_y,\r\n \"end_y\": end_y\r\n }", "def _on_move(self, event):\n\n if not self.button_pressed:\n return\n\n if self.M is None:\n return\n\n x, y = event.xdata, event.ydata\n # In case the mouse is out of bounds.\n if x == None:\n return\n\n dx, dy = x - self.sx, y - self.sy\n x0, x1 = self.get_xlim()\n y0, y1 = self.get_ylim()\n w = (x1-x0)\n h = (y1-y0)\n self.sx, self.sy = x, y\n\n # Rotation\n if self.button_pressed in self._rotate_btn:\n # rotate viewing point\n # get the x and y pixel coords\n if dx == 0 and dy == 0:\n return\n self.elev = art3d.norm_angle(self.elev - (dy/h)*180)\n self.azim = art3d.norm_angle(self.azim - (dx/w)*180)\n self.get_proj()\n self.figure.canvas.draw()\n\n# elif self.button_pressed == 2:\n # pan view\n # project xv,yv,zv -> xw,yw,zw\n # pan\n# pass\n\n # Zoom\n elif self.button_pressed in self._zoom_btn:\n # zoom view\n # hmmm..this needs some help from clipping....\n minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n df = 1-((h - dy)/h)\n dx = (maxx-minx)*df\n dy = (maxy-miny)*df\n dz = (maxz-minz)*df\n self.set_xlim3d(minx - dx, maxx + dx)\n self.set_ylim3d(miny - dy, maxy + dy)\n self.set_zlim3d(minz - dz, maxz + dz)\n self.get_proj()\n self.figure.canvas.draw()", "def check_click(self, mouse_x, mouse_y):\r\n # Change the x/y screen coordinates to grid coordinates\r\n column = mouse_x // 70\r\n row = mouse_y // 70\r\n\r\n if row in [0, 9] or column in [0, 9]:\r\n self.shoot_ray(row, column)\r\n elif 0 < row < 9 and 0 < column < 9:\r\n self.guess_atom(row, column)", "def buttonDownScreenPos(self, btn=None):\n if btn is None:\n btn = self.button()\n return Point(self._buttonDownScreenPos[btn])", "def _updateOnMouseState(self, state):\n x = state.X.abs\n y = state.Y.abs\n \n mscale = self.mouse_icon.getScale() \n \n if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width:\n x = x - mscale[0] - 10\n else:\n x += self.mouse_offset\n \n if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height:\n y = y - mscale[1] - 10\n else:\n y += self.mouse_offset\n \n self.mouse_icon.setPosition((x, y))", "def update_position(self, canvas):\n if self.x <= 0:\n if self.direction == \"SW\":\n self.direction = \"SE\"\n if self.direction == \"W\":\n self.direction = \"E\"\n if self.direction == \"NW\":\n self.direction = \"NE\"\n if self.x >= canvas.width:\n if self.direction == \"SE\":\n self.direction = \"SW\"\n if self.direction == \"E\":\n self.direction = \"W\"\n if self.direction == \"NE\":\n self.direction = \"NW\"\n if self.y <= 0:\n if self.direction == \"NW\":\n self.direction = \"SW\"\n if self.direction == \"N\":\n self.direction = \"S\"\n if self.direction == \"NE\":\n self.direction = \"SE\"\n if self.y >= canvas.height:\n if self.direction == \"SW\":\n self.direction = \"NW\"\n if self.direction == \"S\":\n self.direction = \"N\"\n if self.direction == \"SE\":\n self.direction = \"NE\"\n if self.direction == \"N\":\n self.y -= 1\n if self.direction == \"NE\":\n self.y -= 1\n self.x += 1\n if self.direction == \"E\":\n self.x += 1\n if self.direction == \"SE\":\n self.x += 1\n self.y += 1\n if self.direction == \"S\":\n self.y += 1\n if self.direction == \"SW\":\n self.x -= 1\n self.y += 1\n if self.direction == \"W\":\n self.x -= 1\n if self.direction == \"NW\":\n self.y -= 1\n self.x -= 1", "def int_33H_3(self):\r\n horizontal_position, vertical_position = MainWindow.get_cursor_poisition()\r\n button_status = 1\r\n\r\n self.registers['CX'].set_bytes(horizontal_position, is_int=True)\r\n self.registers['DX'].set_bytes(vertical_position, is_int=True)\r\n self.registers['BX'].set_bytes(button_status, is_int=True)", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def click(self, position):\n w, h = self.window.size\n sx, sy = self.tictactoe.size\n rx, ry = position\n x, y = sx * rx // w, sy * ry // h\n if self.tictactoe.available((x, y)):\n self.choice = (x, y)", "def click(self, X, Y):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= X >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= Y >= self.y - img.get_height() // 2:\n return True\n return False", "def on_coin(self):\r\n if self.grid_pos in self.app.coins:\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0):\r\n return True\r\n # in the x-direction\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1):\r\n return True\r\n # in the y-direction\r\n\r\n return False", "def update(self):\n # Requirement ID: 8.0.3\n\n rn1 = (random.randint(1,101))\n if rn1 < 70 and self.rect.right < self.screen_rect.right - 10:\n self.rect.y += (rn1*1.000/25.000)\n\n rn2 = (random.randint(1,101))\n if rn2 <= 50 and self.rect.right < self.screen_rect.right - 10:\n self.rect.x += (rn2 / 90.0000) + 1\n\n if rn2 > 50 and self.rect.left > self.screen_rect.left + 10:\n self.rect.x -= ((100 - rn2) / 90.0000)", "def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)", "def click_a(self, event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.image_a_coordinates = (x, y)\n print(\"ImageA selected coordinates =\", self.image_a_coordinates)\n return x, y", "def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()", "def correct_position(self):\n\n width = self.screen.get_width()\n height = self.screen.get_height()\n\n if self.last_screen_dimensions[\"width\"] > width:\n self.x -= self.last_screen_dimensions[\"width\"] - width\n\n if self.last_screen_dimensions[\"height\"] > height:\n self.y -= self.last_screen_dimensions[\"height\"] - height", "def HitTest(self, x, y):\r\n\r\n if self.target.GetScreenRect().Contains((x, y)):\r\n return wx.ALL\r\n\r\n return -1", "def update(self):\r\n self.x = 60\r\n self.y = games.mouse.y\r\n self.check_collide()", "def _check_button(self, mouse_pos):\r\n if self.display.easy_button.rect.collidepoint(mouse_pos):\r\n self.settings.set_difficulty(self.settings.easy)\r\n self.ai_game.start_game()\r\n elif self.display.normal_button.rect.collidepoint(mouse_pos):\r\n self.settings.set_difficulty(self.settings.normal)\r\n self.ai_game.start_game()\r\n elif self.display.hard_button.rect.collidepoint(mouse_pos):\r\n self.settings.set_difficulty(self.settings.hard)\r\n self.ai_game.start_game()\r\n elif self.display.quit_button.rect.collidepoint(mouse_pos):\r\n self.ai_game.quit()", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def click(self, pos):\n x, y = pos\n if (self.x - x) ** 2 + (self.y - y) ** 2 <= self.r ** 2:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(30, 50)\n self.speed_x = randint(-100, 100)\n self.speed_y = randint(-100, 100)\n return True\n else:\n return False", "def HitTest(self, x, y):\r\n\r\n if not self._useAero:\r\n if self.targetLeft.GetScreenRect().Contains((x, y)):\r\n return wx.LEFT\r\n if self.targetTop.GetScreenRect().Contains((x, y)):\r\n return wx.UP\r\n if self.targetRight.GetScreenRect().Contains((x, y)):\r\n return wx.RIGHT\r\n if self.targetBottom.GetScreenRect().Contains((x, y)):\r\n return wx.DOWN\r\n if self.targetCenter.IsValid() and self.targetCenter.GetScreenRect().Contains((x, y)):\r\n return wx.CENTER\r\n else:\r\n constants = [wx.LEFT, wx.UP, wx.RIGHT, wx.DOWN, wx.CENTER]\r\n lenRects = len(self._aeroRects)\r\n for indx, rect in enumerate(self._aeroRects):\r\n if rect.Contains((x, y)):\r\n if indx < lenRects or (indx == lenRects-1 and self._valid):\r\n return constants[indx]\r\n\r\n return -1", "def show_mouse_position_with_px(self):\n self.main_menu_greets_fonts = pygame.font.Font(os.path.join(PATH_TO_RESOURCE, 'font_forever.ttf'), 10)\n self.positiontext(f'Mouse position {pygame.mouse.get_pos()}', (770, 20))\n self.mouse = pygame.mouse.get_pos()\n return self.mouse", "def buttonDownPos(self, btn=None):\n if btn is None:\n btn = self.button()\n return Point(self.currentItem.mapFromScene(self._buttonDownScenePos[btn]))", "def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass", "def int_21H_B(self):\r\n\r\n char_present = self.GUI.is_key_pressed()\r\n\r\n if char_present:\r\n self.registers['AX'].move_into(255, 0, is_int=True)\r\n else:\r\n self.registers['AX'].move_into(0, 0, is_int=True)", "def offscreen(self, screen):\n if self.x < 0:\n return True\n elif self.y < 0:\n return True\n elif self.x + self.width > screen.get_width():\n return True\n elif self.y + self.height > screen.get_height():\n return True\n return False", "def ZombiesOnTheScreen(self):\n # Requirement ID: 8.0.2\n\n if (self.rect.right) >= self.screen_rect.right:\n self.settings.fleet_direction *= -1\n\n if (self.rect.left) <= self.screen_rect.left:\n self.settings.fleet_direction *= -1", "def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r", "def click(self, event):\n now = time.time() * 1000\n if self.time - now > 1000: \n return None\n if distance(self.x, event.x, self.y, event.y) > self.size:\n return False\n elif self.time - now > 500:\n return 50\n elif self.time - now > 200:\n return 100\n else: \n return 200", "def update(self):\n pygame.event.pump()\n self.pos_x -= 1.5", "def is_visible(self, position, size=0):\n # return True\n size /= self.scale # size is in pixel\n in_x = (self.focus.x + self.offset.x / self.scale - size <=\n position.x <=\n self.focus.x - self.offset.x / self.scale + size)\n in_y = (self.focus.y + self.offset.y / self.scale - size <=\n position.y <=\n self.focus.y - self.offset.y / self.scale + size)\n # if name == \"earth\":\n # print(\"{:+e} {:+e} {}\".format(self.focus.y + self.offset2.y\n # , position.y, in_y))\n # print(\"{:+e} {:+e}\".format(self.focus.x, self.focus.y))\n return in_x and in_y", "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def world_to_screen(self, x, y):\n return x-self.x, self.h-(y-self.y)", "def movement(self, screen):\n if self.tx is not None and self.ty is not None: # Target is set\n\n X = self.x - self.tx\n Y = self.y - self.ty\n\n if X < 0: # --->\n self.img = pygame.image.load(next(self.walking_east_images))\n self.x += self.velocity\n elif X > 0: # <----\n self.img = pygame.image.load(next(self.walking_west_images))\n self.x -= self.velocity\n if Y > 0: # up\n self.img = pygame.image.load(next(self.walking_north_images))\n self.y -= self.velocity\n elif Y < 0: # dopwn\n self.img = pygame.image.load(next(self.walking_south_images))\n self.y += self.velocity\n screen.blit(self.img, (self.x, self.y))\n\n if X == 0 and Y == 0:\n self.tx, self.ty = None, None\n self.agent.actionCompleted()", "def update(self):\n \n if games.keyboard.is_pressed(games.K_LEFT):\n #check if we reach the edge of the screen, so we do not pass border\n if self.x == 20 or self.x == games.screen.width-20:\n self.x = 40\n else:\n self.x -= 2\n \n if games.keyboard.is_pressed(games.K_RIGHT):\n #check if we reach the edge of the screen, so we do not pass border\n if self.x == 20 or self.x == games.screen.width-20:\n self.x = games.screen.width-40\n else:\n self.x += 2 \n \n if games.keyboard.keypress(games.K_z):\n self.fire_bullet()\n \n self.get_hit()\n \n if self.score.value == 0:\n self.end_game()", "def button_on(self, value: int):\n if value is 1:\n self.button_1 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 50, 570)\n elif value is 2:\n self.button_2 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 50, 75)\n elif value is 3:\n self.button_3 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 750, 570)\n elif value is 4:\n self.button_4 = arcade.Sprite(settings.button_pressed, .7, 0, 0, 0,\n 0, 750, 75)", "def button_a(self) -> bool:\n return bool(self.pressed & 0x2)", "def move(self):\n self.val = (pygame.mouse.get_pos()[\n 0] - self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini\n if self.val < self.mini:\n self.val = self.mini\n if self.val > self.maxi:\n self.val = self.maxi", "def click_car(self, pos):\n a = self.h / 50\n x, y = pos\n if ((x > self.x) and (x < self.x + 260 * a) and (y > self.y - 40 * a)\n and (y < self.y + self.h + 25 * a)):\n self.x = randint(200, 500)\n self.y = randint(200, 500)\n self.h = randint(10, 50)\n self.dir = 1\n self.speed_x = randint(10, 200)\n return True\n else:\n return False", "def _get_loc_top_left_placement_b(button_number):\r\n if button_number == 0:\r\n return 35 / 2, 7 / 2 # placement button that starts the game\r\n\r\n elif button_number == 1:\r\n return 35 / 2, 13 / 2 # placement button that brings program back to main menu\r", "def movement(self):", "def keyboard_menu_control(self, app):\n mx, my = pg.mouse.get_pos()\n click = False\n\n menu_view = self.get_view.menu_view\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n if menu_view.menu_button.collidepoint((mx, my)):\n if click:\n app.menu_view_running = False\n\n if menu_view.quit_button.collidepoint((mx, my)):\n if click:\n pg.quit()\n sys.exit(0)", "def click(self, X, Y):\n tower_image = self.tower_images[self.level - 1]\n\n if X <= self.x + tower_image.get_width() // 2 - 2 * self.extra_padding and X >= self.x - tower_image.get_width() // 2 + self.extra_padding // 2:\n if self.name in TowerConstants.MAGIC_TOWER_NAMES or self.name in TowerConstants.SUP_TOWER_NAMES:\n if Y <= self.y + self.height // 2 - (2 * self.extra_padding) and Y >= self.y - self.height // 2 + (2 * self.extra_padding):\n return True\n else:\n if Y <= self.y + self.height // 2 - (4 * self.extra_padding) and Y >= self.y - self.height // 2 + (2 * self.extra_padding):\n return True\n return False", "def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]", "def check_clicked(self, events):\n x = self.x\n y = self.y\n xsize = self.xsize\n ysize = self.ysize\n (a, b) = pygame.mouse.get_pos()\n if a>x and b>y and a<x+xsize and b<y+ysize:\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.clickedAction(events)\n self.clicked = True\n return self.clicked", "def update(self):\r\n if self.right > games.screen.width: #or self.left < 0:\r\n self.dx = -self.dx\r\n\r\n if self.bottom > games.screen.height or self.top < 0:\r\n self.dy = -self.dy\r\n\r\n self.handle_collide()\r\n\r\n if self.left < 60:\r\n self.end_game()", "def update_screen_loc(self):\n old_loc = list(self.loc)\n\n self.loc = self.next_loc()\n if self.loc != old_loc:\n for action in self.actions:\n if action == Action.running:\n if self.current_speed < self.max_speed:\n self.current_speed += 0.5\n elif action == Action.slide:\n self.velocity -= 0.5\n self.current_speed = self.velocity\n if self.velocity <= 0:\n self.remove_action(Action.slide)\n self.remove_action(Action.damaged)\n elif action == Action.jumping:\n if self.jump_charge > self.jump_start_charge:\n self.jump_charge -= 1\n else:\n self.remove_action(Action.jumping)\n elif action == Action.attack:\n self.attack_charge -= 1\n if self.attack_charge <= 0:\n self.remove_action(Action.attack)\n\n self.redraw = True", "def menu(self):\n self.font = pygame.font.SysFont(\"comicsansms\", 72)\n i = 0\n self.window.fill((60,50,20))\n self.menu_buttons = [0,0,0]\n for i in range(3):\n self.menu_buttons[i] = pygame.draw.rect(self.window, (255, 255, 255), (270, 200+i*120, 300, 100)) \n text = self.font.render(\"H x H\", True, (0, 128, 0))\n self.window.blit(text, (330, 200))\n text = self.font.render(\"H x C\", True, (0, 128, 0))\n self.window.blit(text, (330, 320))\n text = self.font.render(\"C x C\", True, (0, 128, 0))\n self.window.blit(text, (330, 440))\n pygame.display.flip()\n while True: \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit = True\n return\n elif event.type == pygame.KEYDOWN:\n if evenvt.key == pygame.K_r:\n self.play()\n elif event.key == pygame.K_t:\n self.tip()\n elif event.key == pygame.K_m:\n self.__init__()\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n pos = pygame.mouse.get_pos()\n for i in range(len(self.menu_buttons)):\n if self.menu_buttons[i].collidepoint(pos):\n y_pos=pos[1]\n difficulty=0\n if y_pos<=300 and y_pos>=200:\n return [1,difficulty]\n elif y_pos<=420 and y_pos>=320:\n difficulty=self.difficulty_menu(\"Computer difficulty:\")\n return [2,difficulty]\n elif y_pos<=540 and y_pos>=420:\n difficulty_bot1=self.difficulty_menu(\"Computer 1 difficulty:\")\n difficulty_bot2=self.difficulty_menu(\"Computer 2 difficulty:\")\n return [3,difficulty_bot1, difficulty_bot2]", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide()", "def _onmove(self, event):", "def button2(msg,x,y,w,h,ic,ac,action=None): #de button die wordt gebruikt als je een onzichtbare button wilt\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y: #als de muis over de knop hovert\r\n\r\n if click[0] == 1 and action != None: #als je er op klikt, doe actie\r\n action()\r\n\r\n smallText = pygame.font.SysFont(\"freesansbold.ttf\",20)\r\n textSurf, textRect = text_objects(msg, smallText)\r\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\r\n gameDisplay.blit(textSurf, textRect)", "def center_ava(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)", "def move_to_position2(self):", "def update_menu(self):\n x_pos = (self.menu_x - self.block_x) // 2 + self.offset_x\n y_pos = self.offset_y + 50 # account for bottom text\n self.menu_sprites[0].image = self.sprite_types[self.curr_sprite]\n for sprite in self.menu_sprites:\n sprite.x = x_pos\n sprite.y = y_pos + sprite.image.height\n sprite.update()\n y_pos += self.offset_y + sprite.image.height", "def moving(self):\n self.animation()\n assert(self.rect.x % 32 == 0 or self.rect.y % 32 == 0), \\\n 'Not centered on tile'", "def __map_button(self, button):\n _, start_code, start_value = button\n value = start_value\n ev_type = \"Key\"\n code = self.manager.codes['xpad'][start_code]\n if 1 <= start_code <= 4:\n ev_type = \"Absolute\"\n if start_code == 1 and start_value == 1:\n value = -1\n elif start_code == 3 and start_value == 1:\n value = -1\n return code, value, ev_type", "def on_click(self, x, y):\n mul_x, mul_y = self.multiplier\n off_x, off_y = self.offset\n x -= off_x\n x /= mul_x\n y -= off_y\n y /= mul_y\n for button in self.button_dict.values():\n button.check_click(x, y)", "def is_visible(self):\n return self.rect.x < self.screen_rect.width", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def key_hooks(self, key):\n if key == pygame.K_LEFT:\n self.bias[0] -= 10\n elif key == pygame.K_RIGHT:\n self.bias[0] += 10\n elif key == pygame.K_DOWN:\n self.bias[1] += 10\n elif key == pygame.K_UP:\n self.bias[1] -= 10\n elif key == pygame.K_EQUALS:\n self.scale[0] += 5\n self.scale[1] += 5\n self.scale[2] += 5\n elif key == pygame.K_MINUS:\n self.scale[0] -= 5\n self.scale[1] -= 5\n self.scale[2] -= 5\n elif key == pygame.K_q:\n self.view_angle[0] += .1\n elif key == pygame.K_w:\n self.view_angle[0] -= .1\n elif key == pygame.K_a:\n self.view_angle[1] += .1\n elif key == pygame.K_s:\n self.view_angle[1] -= .1\n elif key == pygame.K_z:\n self.view_angle[2] += .1\n elif key == pygame.K_x:\n self.view_angle[2] -= .1", "def off_screen(self):\n return self._x < 0", "def move_to_position1(self):", "def HitTest(self, x, y):\r\n\r\n return 0", "def update(self):\n if games.keyboard.is_pressed(games.K_RIGHT):\n self.x += 1\n if games.keyboard.is_pressed(games.K_a):\n self.x -= 1", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n return\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.running = False\n return\n elif event.key == pygame.K_LEFT:\n if (self.xv, self.yv) != (1, 0):\n self.xv, self.yv = -1, 0\n return\n elif event.key == pygame.K_RIGHT:\n if (self.xv, self.yv) != (-1, 0):\n self.xv, self.yv = 1, 0\n return\n elif event.key == pygame.K_UP:\n if (self.xv, self.yv) != (0, 1):\n self.xv, self.yv = 0, -1\n return\n elif event.key == pygame.K_DOWN:\n if (self.xv, self.yv) != (0, -1):\n self.xv, self.yv = 0, 1\n return", "def update(self):\n self.x = games.mouse.x\n self.y = games.mouse.y\n self.check_collide()", "def is_pressed(self) -> bool:", "def go_left(self):\n self.rect.centerx -= 9", "def ingame_to_scene(self, x, y):\n new_x = (x*8)+4\n # TODO: this y coord may be slightly off\n new_y = (self.world.height*8) - (y*8) - 4\n # Okay, seems we don't actually need this here, for what we're using\n # it for, at least. May want to rename or refactor these a bit so\n # that these two functions are analagous, 'cause they technically do\n # slightly different things now.\n #(scene_x, scene_y) = self.mainwindow.get_zoom_transform().map(new_x, new_y)\n #return (scene_x, scene_y)\n return (new_x, new_y)", "def update(self, pressed_key):\n if pressed_key[K_UP]:\n self.rect.move_ip(0, -5) # move in place\n if pressed_key[K_DOWN]:\n self.rect.move_ip(0, 5)\n if pressed_key[K_LEFT]:\n self.rect.move_ip(-5, 0)\n if pressed_key[K_RIGHT]:\n self.rect.move_ip(5, 0)\n\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.right > screen_width:\n self.rect.right = screen_width\n if self.rect.top <= 0:\n self.rect.top = 0\n if self.rect.bottom >= screen_height:\n self.rect.bottom = screen_height", "def update(self):\n self.x += 0.1\n self.rect.centerx = self.x\n\n if self.rect.left >= self.screen_rect.right:\n self.x -= self.screen_rect.width + self.rect.width\n self.rect.y = randint(0, self.screen_rect.height)", "def wasClicked(self, point):\n p1 = self.rect.getP1()\n p2 = self.rect.getP2()\n if (p1.getX() <= point.getX() <= p2.getX() and\n p1.getY() <= point.getY() <= p2.getY()):\n return True\n return False", "def Canvas_onclick(event):\n global ix, iy\n ix, iy = event.xdata, event.ydata\n print 'x = %f -> i = %d, y = %f' % (ix,ix/0.5*fig.Fulllength, iy)\n\n global coords\n coords = [ix, iy]\n\n return coords", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide() # New for Rev2.0\r\n \r\n if self.left <0:\r\n self.left = 0\r\n \r\n if self.right > games.screen.width:\r\n self.right = games.screen.width\r\n \r\n self.check_collide()", "def outOfScreen(self):\n x,y = self.currentLevel.transformToScreenCoordinate(self.position)\n w,h = cblocals.GAME_SCREEN_SIZE\n if x<0 or y<0 or x>x or y>h:\n return True\n return False", "def scene_to_ingame(self, x, y):\n (scene_x, scene_y) = self.mainwindow.get_inverted_zoom_transform().map(x, y)\n new_x = scene_x//8\n # TODO: this y coord may be slightly off\n new_y = ((self.world.height*8) - scene_y)//8\n return (new_x, new_y)", "def check_in_screen(self):\n if self.rect.colliderect(screen_rect) and not self.moving:\n return True\n return False", "def update(self): \n super().update()\n if self.center_x < constants.left_limit:\n self.center_x = self.screen_width + constants.offscreen_space\n if self.center_x > self.screen_width + constants.offscreen_space:\n self.center_x = constants.left_limit\n if self.center_y > self.screen_height + constants.offscreen_space:\n self.center_y = constants.bottom_limit\n if self.center_y < constants.bottom_limit:\n self.center_y = self.screen_height + constants.offscreen_space", "def click(self, x: float, y: float) -> bool:\n\n if self.x <= x <= self.x + self.width and self.y <= y < self.y + self.height:\n return True\n\n return False" ]
[ "0.6537602", "0.6219583", "0.6192906", "0.6191912", "0.6028021", "0.6004765", "0.59921515", "0.59902275", "0.5902409", "0.58934194", "0.58909214", "0.58835", "0.5861542", "0.5846216", "0.5830137", "0.580614", "0.5786481", "0.57471406", "0.57405293", "0.5723758", "0.56889206", "0.5688247", "0.5682304", "0.56425667", "0.56294245", "0.5628964", "0.56058913", "0.5594907", "0.55824447", "0.55784506", "0.55689573", "0.55544233", "0.5537898", "0.5533606", "0.55335623", "0.5521749", "0.5520601", "0.55159616", "0.55042535", "0.5503647", "0.55034536", "0.54954624", "0.5486188", "0.5483328", "0.5478527", "0.5464501", "0.5458971", "0.54536957", "0.544624", "0.54429024", "0.54397446", "0.54367083", "0.5429021", "0.5424539", "0.5421252", "0.54144865", "0.54087853", "0.5407482", "0.54068387", "0.5405369", "0.5403695", "0.5402924", "0.53994185", "0.53966814", "0.5376809", "0.5374272", "0.53708667", "0.5365353", "0.53615105", "0.5361356", "0.5357899", "0.53577244", "0.53559726", "0.53536415", "0.5348464", "0.5346842", "0.534316", "0.5343102", "0.53422856", "0.5341419", "0.53369176", "0.5334818", "0.5334774", "0.53313893", "0.532901", "0.5325545", "0.5319334", "0.53092486", "0.5303854", "0.53024906", "0.5299793", "0.52949136", "0.52931416", "0.52921504", "0.5291407", "0.5286212", "0.528441", "0.5283964", "0.52825284", "0.52794737" ]
0.5523286
35
Called when a user releases a mouse button.
def on_mouse_release(self, x, y, button, modifiers): menu: Menu = self.get_menu_for_display() menu_click_x, menu_click_y = self.get_menu_click(menu, x, y) if button == arcade.MOUSE_BUTTON_LEFT: if menu: menu.button_list.check_mouse_release_for_buttons( menu_click_x, menu_click_y, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_release(self, x, y, button):\n pass", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def OnMouseUp(self, evt):\n self.ReleaseMouse()", "def release():\n gui.mouseUp()", "def mouse_release(self):\n\n # play button press\n if self.play_button.is_active:\n # change to gameplay\n self.switch_context(game.GameContext)", "def emitReleaseEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mouseReleaseEvent signal\n self.mouseRelease.emit(self, clickLocation, button, currentKbKey, items)", "def button_release_event(self, widget, event):\n x, y = event.x, event.y\n\n # x, y = coordinates where the button was released\n self.last_win_x, self.last_win_y = x, y\n\n button = 0\n # prepare button mask as in button_press_event()\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('button-release', button, data_x, data_y)", "def release(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, button_down=False, button_up=True)", "def button_release(self, event: Any) -> None:\n if event.button == 1:\n self.left_button_down = False\n if event.button == 2:\n self.middle_button_down = False\n if event.button == 3:\n self.right_button_down = False", "def mouseReleased():\n if not game_controller.game_over:\n if game_controller.falling_disk and \\\n game_controller.falling_disk.y_vel == 0:\n game_controller.handle_mouseReleased()", "def _OnMplMouseRelease( self, ev ):\n if ev.button == 3:\n ev.guiEvent.Skip()", "def on_release(self):\n self.pressed = False", "def on_release(self):\n self.pressed = False", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def mouseReleaseEvent( self, event ):\n event.setAccepted(False)\n if self._hotspotPressed:\n event.accept()\n self._hotspotPressed = False\n return\n \n # ignore events when the scene is in view mode\n scene = self.scene()\n if ( self.isLocked() or self._ignoreMouseEvents or \\\n (scene and (scene.inViewMode() or scene.isConnecting()))):\n event.ignore()\n self._ignoreMouseEvents = False\n return\n \n super(XNode, self).mouseReleaseEvent(event)\n \n # emit the geometry changed signal\n self.emitGeometryChanged()\n \n # unblock the selection signals\n if ( scene ):\n scene.blockSelectionSignals(False)\n \n delta = datetime.datetime.now() - self._pressTime\n if not scene.signalsBlocked() and delta.seconds < 1:\n scene.nodeClicked.emit(self)", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def mouseReleaseEvent(self, event):\n # super(PlotWidget, self).mouseReleaseEvent(event)\n event.accept()", "def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp) -> T | None:", "def __mouse_release(self, event, right_click=False):\n global choose_rectangle\n if right_click:\n return\n if choose_rectangle:\n self.__finish_rectangle(event)", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def mouseReleaseEvent(self, event):\n if event.button() is not QtCore.Qt.MouseButton.LeftButton:\n return False\n if self.mousenode is not None:\n self.remove_mousenode(event)\n return QtGui.QGraphicsScene.mouseReleaseEvent(self, event)", "def mouseReleaseEvent(self, event):\n super(QIntSpinner3DS, self).mousePressEvent(event)\n super(QIntSpinner3DS, self).mouseReleaseEvent(event)\n self.unsetCursor()", "def mouseReleaseEventEnabled(self, ev):\n\n self._btns.remove(ev.button())", "def mouseReleaseEvent(self, event: QMouseEvent):\n self._moving = False\n self.rectChanged.emit(self._rect)\n super().mouseReleaseEvent(event)", "def button_release_cb(self, darea, event):\n self.oldx, self.oldy = event.x, event.y\n self.draw_pointer(self.cr, None, None)\n self.queue_draw()\n self.oldx, self.oldy = None, None\n self.emit('end-dnd')\n return True", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def down(self, event):\n self.dragx = event.x\n self.dragy = event.y\n self.canvas.bind(\"<B1-Motion>\", self.motion)\n self.canvas.bind(\"<ButtonRelease-1>\", self.up)\n return True", "def m_release(self, button: MButton):\n pass", "def HandleRelease(self, event: tkEvent):\n pass", "def _release(self, event):", "def on_mouse_press(self, x, y, button):\n\n pass", "def unpress(self):\n if self.unclick:\n self.clicked = False", "def ev_MOUSEUP(self, event):", "def mouseReleased(self, _evt, _id):\n if not self.is_enabled: return False\n \n self.mouse_icon.mouseReleased(_evt, _id)\n return False", "def mouseReleaseEvent(self, event):\n self.dragging = False\n self.parent.unsetCursor()\n if self.moved:\n self.draw_visible_area()\n self.moved = False\n else:\n if self.cur_hover:\n dialog = TileInfoDialog(self.parent, self.cur_hover, self.mainwindow.config)\n dialog.exec()\n\n # Re-focus the main window\n self.mainwindow.activateWindow()", "def keyReleaseEvent(self, event):\n self.game_engine.input_manager.keyReleaseEvent(event)", "def on_mouse_release(self, x: float, y: float, button: int, modifiers: int):\n if self.heldLetter is not None:\n self.active_blocks.remove(self.heldLetter)\n self.moving_blocks.append(self.heldLetter)\n if len(arcade.get_sprites_at_point((x, y), self.inactive_blocks)) == 0 and x < BOARD_WIDTH:\n letter_x, letter_y = self.nearest_cell(x, y)\n self.heldLetter.place(letter_x, letter_y)\n self.board_temp[int((letter_x-SLOT_WIDTH/2)/SLOT_WIDTH)][int((letter_y - SLOT_HEIGHT/2)/SLOT_HEIGHT)] = self.heldLetter\n else:\n self.heldLetter.return_home()\n self.heldLetter = None", "def on_mouse_release(self, x: float, y: float, button, modifiers):\n #dialogue buttons\n check_mouse_release_for_buttons(x, y, self.levels[self.current_level].dialogue_list)\n\n #room info prompt buttons\n check_mouse_release_for_buttons(x, y, self.levels[self.current_level].room_info_list)", "def handle_mouse_press(self, event):", "def mouseReleaseEvent(self, ev):\n\n # handle the built mouse events first\n\n # panning...\n if self.panning and (ev.button() == Qt.LeftButton):\n # we're done panning\n self.leftBtnClicked = False\n self.setCursor(Qt.OpenHandCursor)\n self.lastPanPoint = QPoint()\n\n # \"auto\" rubber banding...\n elif self.rubberBandKey and self.rubberBanding:\n\n # end the rubber band selection\n rubberBandRect = self.endRubberBand().toRect()\n\n # check if the user selected anything\n if (rubberBandRect):\n items = self.items(rubberBandRect)\n\n # filter the selected items\n items = self.filterSelectedItems(items)\n\n # If we're handling selections deal with the selection states of our marks\n if self.doSelections:\n\n for item in self.selectedItems:\n item.setSelected(False)\n for item in items:\n item.setSelected(True)\n self.selectedItems = items\n\n # call the emit method - we don't directly emit here in case a child class\n # wants to transform the data before emitting it.\n self.emitRubberbandSelection(rubberBandRect, items)\n\n else:\n # This event isn't handled by automatically - emit a release event\n clickLocation = self.mapToScene(ev.pos())\n\n # do a \"sloppy selection\" and return all items that intersect our\n # selection rectangle. The selection rectangle is set by calling\n # the setSelectionRadius method.\n\n # move our selection rectangle into place - depending on the size of\n # the selection area, this may not be centered on the click location\n areaLoc = ev.pos() - self.selectionRadius\n self.selectionArea.moveTo(areaLoc)\n\n # check if the user clicked on anything - this will return a list of\n # items that intersect the selection rectangle.\n items = self.items(self.selectionArea)\n\n # filter the selection so we only return marks or text not associated\n # with a mark.\n items = self.filterSelectedItems(items)\n\n # call the emit method - we don't directly emit here in case a child class\n # wants to transform the data before emitting it.\n self.emitReleaseEvent(clickLocation, ev.button(), self.currentKbKey, items)", "def mouse_out(self):\n pass", "def mouseReleaseEvent(self, event):\n self.box_begin = self.begin\n self.box_end = event.pos()\n self.begin = event.pos()\n self.end = event.pos()\n if not self.permanent_show:\n self.update()", "def rightButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.RIGHT_BUTTON)", "def _release(self, event):\n self._set_cursor(False)\n # self._pressv is deprecated but we still need to maintain it\n self._pressv = None\n\n if not self._interactive:\n self._selection_artist.set_visible(False)\n\n if (self._active_handle is None and self._selection_completed and\n self.ignore_event_outside):\n return\n\n vmin, vmax = self.extents\n span = vmax - vmin\n\n if span <= self.minspan:\n # Remove span and set self._selection_completed = False\n self.set_visible(False)\n if self._selection_completed:\n # Call onselect, only when the span is already existing\n self.onselect(vmin, vmax)\n self._selection_completed = False\n else:\n self.onselect(vmin, vmax)\n self._selection_completed = True\n\n self.update()\n\n self._active_handle = None\n\n return False", "def exit_on_click(self):\n self.get_mouse()\n self._close()", "def on_canvas_mouse_release(self, event) -> None:\r\n\r\n self.edit_toggle_mode = None", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def key_release_event(self, event):\n pass", "def ev_MOUSEDOWN(self, event):", "def check_mouse_release_for_buttons(x: float, y: float, button_list: list):\n for button in button_list:\n if button.pressed:\n #sets button pressed to false\n button.on_release()", "def keyReleaseEvent (self, event):\n super(DiagramScene, self).keyReleaseEvent(event)", "def _ReleaseAllButtons(self):\n self._kit.MouseReleaseAllButtons()\n time.sleep(self.send_delay)", "def input(self, event: pygame.event) -> None:\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.user_clicked = True", "def check_mouse_release_for_buttons(_x, _y, button_list):\n for button in button_list:\n if button.pressed:\n button.on_release()", "def ev_joybuttondown(self, event: tcod.event.JoystickButton) -> T | None:", "def OnTokenButtonRelease(self, event):\n self._drag_data = {\"x\": 0, \"item\": None}\n\n # Rebind the main GUI buttons because they are unbinded while dragging the beats\n self.myMainGUI.root.after(200, self.myMainGUI.bindButtons)", "def leftButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.LEFT_BUTTON)", "def mouseReleaseEvent(self, event):\n width = self.frameGeometry().width()\n height = self.frameGeometry().height()\n cursor = QtGui.QCursor()\n new_pos = self.mapFromGlobal(cursor.pos())\n x = new_pos.x()\n y = new_pos.y()\n self.__selector_y = y/float(height) # normalized value of the y position\n \tself.__selector_x = x/float(width) #normalised value of the x position\n self.updatePixelColor()\n self.repaint()", "def releaseKeyButtons(self):\n self._myKey.removeKeyButtonEvent([\n CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B,\n CONFIG_KEY.BUTTON_JOY_UP,\n CONFIG_KEY.BUTTON_JOY_DOWN,\n CONFIG_KEY.BUTTON_JOY_LEFT,\n CONFIG_KEY.BUTTON_JOY_RIGHT,\n CONFIG_KEY.BUTTON_JOY_OK\n ])", "def on_mouse_up(self, pos, mouse_button):\n for item in button.Button.all_buttons:\n if item.collidepoint(pos):\n self.buttons_clicked.append((item, mouse_button))\n item.on_click(mouse_button)", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def on_key_release(self, key_released: int, _: int) -> None:\n if key_released in (key.LEFT, key.RIGHT, key.A, key.D):\n self.change_x = 0\n self.direction = None", "def mouseRelease(self, evt):\n if self.__enabled and self.__indicator.isVisible():\n if not self.__indicatorGlobalRect().contains(\n evt.globalPos()):\n self.__stopScrolling()\n return True\n \n return False", "def mouseReleaseEvent(self, ev):\n super(PlotObject, self).mouseReleaseEvent(ev)\n if self._downpos == ev.pos():\n x = ev.pos().x()\n y = ev.pos().y()\n if ev.button() == 2 :\n self.mPosition()\n elif ev.button() == 1:\n x = x - self.width() / 2\n y = y - self.height() / 2\n #self.pan(-x, -y, 0, relative=True)\n print(self.opts['center'])\n print(x,y)\n self._prev_zoom_pos = None\n self._prev_pan_pos = None", "def up(self, event):\n event.widget.unbind (\"<B1-Motion>\")\n event.widget.unbind (\"<ButtonRelease-1>\")\n self.diag.update_arrows()", "def handle_mousehold(self, button, name):\r\n if widget.Widget.handle_mousehold(self, button, name):\r\n app.App.handle_mousehold(self, button, name)\r\n return True\r\n return False", "def mouse_right_up(self):\n pass", "def isButtonReleased() -> bool:\n pass", "def handle_release(self, x, y):\n # append new line to list of lines\n self.lines.append( (self.first_point, (x, y)) )\n\n # clear mouse pressed flag and rubber band line coords\n self.pressed_flag = False\n self.first_point = None\n self.last_point = None\n\n # trigger canvas to redraw itself\n self.redraw()", "def onRelease(event):\r\n global initPos\r\n initPos = None # Reset the position ready for next click\r", "def _onrelease(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-ButtonRelease>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button%s-ButtonRelease>\" % num,\n eventfun, add)", "def mouseReleaseEvent (self, event):\n if self.itemMoved:\n self.parentWidget.DataChanged.emit()\n self.itemMoved = False; \n super(DiagramItem, self).mouseReleaseEvent(event)", "def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)", "def mouse_right_down(self):\n pass", "def keyReleaseEvent(self, ev):\n self.currentKbKey = None\n\n if (ev.key() == self.panKey):\n # disable Pan/Zoom mode\n self.panning = False\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.selectAddKey):\n # disable selection add mode\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.zoomKey):\n # disable zoom mode\n self.__zooming = False\n else:\n self.keyRelease.emit(self, ev)", "def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)", "def down(self, *args):\n self.cur_win().down()", "def mouseReleaseEvent(self, event):\n # Ignore dummy events. e.g., Faking pan with left button ScrollHandDrag.\n dummyModifiers = Qt.KeyboardModifier(Qt.KeyboardModifier.ShiftModifier | Qt.KeyboardModifier.ControlModifier\n | Qt.KeyboardModifier.AltModifier | Qt.KeyboardModifier.MetaModifier)\n if event.modifiers() == dummyModifiers:\n QGraphicsView.mouseReleaseEvent(self, event)\n event.accept()\n return\n\n # Finish dragging a region zoom box?\n if (self.regionZoomButton is not None) and (event.button() == self.regionZoomButton):\n QGraphicsView.mouseReleaseEvent(self, event)\n zoomRect = self.scene.selectionArea().boundingRect().intersected(self.sceneRect())\n # Clear current selection area (i.e. rubberband rect).\n self.scene.setSelectionArea(QPainterPath())\n self.setDragMode(QGraphicsView.DragMode.NoDrag)\n # If zoom box is 3x3 screen pixels or smaller, do not zoom and proceed to process as a click release.\n zoomPixelWidth = abs(event.pos().x() - self._pixelPosition.x())\n zoomPixelHeight = abs(event.pos().y() - self._pixelPosition.y())\n if zoomPixelWidth > 3 and zoomPixelHeight > 3:\n if zoomRect.isValid() and (zoomRect != self.sceneRect()):\n self.zoomStack.append(zoomRect)\n self.updateViewer()\n self.viewChanged.emit()\n event.accept()\n self._isZooming = False\n return\n\n # Finish panning?\n if (self.panButton is not None) and (event.button() == self.panButton):\n if self.panButton == Qt.MouseButton.LeftButton:\n QGraphicsView.mouseReleaseEvent(self, event)\n else:\n # ScrollHandDrag ONLY works with LeftButton, so fake it.\n # Use a bunch of dummy modifiers to notify that event should NOT be handled as usual.\n self.viewport().setCursor(Qt.CursorShape.ArrowCursor)\n dummyModifiers = Qt.KeyboardModifier(Qt.KeyboardModifier.ShiftModifier\n | Qt.KeyboardModifier.ControlModifier\n | Qt.KeyboardModifier.AltModifier\n | Qt.KeyboardModifier.MetaModifier)\n dummyEvent = QMouseEvent(QEvent.Type.MouseButtonRelease, QPointF(event.pos()),\n Qt.MouseButton.LeftButton, event.buttons(), dummyModifiers)\n self.mouseReleaseEvent(dummyEvent)\n self.setDragMode(QGraphicsView.DragMode.NoDrag)\n if len(self.zoomStack) > 0:\n sceneViewport = self.mapToScene(self.viewport().rect()).boundingRect().intersected(self.sceneRect())\n delta = sceneViewport.topLeft() - self._scenePosition\n self.zoomStack[-1].translate(delta)\n self.zoomStack[-1] = self.zoomStack[-1].intersected(self.sceneRect())\n self.viewChanged.emit()\n event.accept()\n self._isPanning = False\n return\n\n scenePos = self.mapToScene(event.pos())\n if event.button() == Qt.MouseButton.LeftButton:\n self.leftMouseButtonReleased.emit(scenePos.x(), scenePos.y())\n elif event.button() == Qt.MouseButton.MiddleButton:\n self.middleMouseButtonReleased.emit(scenePos.x(), scenePos.y())\n elif event.button() == Qt.MouseButton.RightButton:\n self.rightMouseButtonReleased.emit(scenePos.x(), scenePos.y())\n\n QGraphicsView.mouseReleaseEvent(self, event)", "def _on_key_release(self, event):", "def on_release(self, keyname):\n self.keydown = False\n keyname = str(keyname).strip('\\'')\n log.info('KEY RELEASE ' + keyname)\n if keyname in self.controls_keyrelease:\n key_handler = self.controls_keyrelease[keyname]()", "def _onMouseButton(e):\n\n mouse_event = MouseButtonEvent(e)\n orca_state.lastInputEvent = mouse_event\n\n # A mouse button event looks like: mouse:button:1p, where the\n # number is the button number and the 'p' is either 'p' or 'r',\n # meaning pressed or released. We only want to stop speech on\n # button presses.\n #\n if mouse_event.pressed:\n speech.stop()", "def mouseReleaseEvent(self, event):\n if GameEngine().running:\n self.parent().stop()\n return\n # end if\n self.parent().parent()._on_start()\n self.parent().parent().on_start()\n game_engine = GameEngine()\n game_engine.start()\n pm.refresh(f=True)\n while game_engine.running:\n if not game_engine.paused:\n game_engine.update_main_game_loop()\n # end if\n # end while\n return QtGui.QPushButton.mouseReleaseEvent(self, event)", "def handle_mouseup(self, button, name):\r\n x = widget.Widget.handle_mouseup(self, button, name)\r\n if not self.mouse_on_me():\r\n return False\r\n if not self.get_visible():\r\n return False\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mouseup(button, name):\r\n return True\r\n return x", "def _release(self, event):\n # Release active tool handle.\n if self._active_handle_idx >= 0:\n if event.button == 3:\n self._remove_vertex(self._active_handle_idx)\n self._draw_polygon()\n self._active_handle_idx = -1\n\n # Complete the polygon.\n elif len(self._xys) > 3 and self._xys[-1] == self._xys[0]:\n self._selection_completed = True\n if self._draw_box and self._box is None:\n self._add_box()\n\n # Place new vertex.\n elif (not self._selection_completed\n and 'move_all' not in self._state\n and 'move_vertex' not in self._state):\n self._xys.insert(-1, (event.xdata, event.ydata))\n\n if self._selection_completed:\n self.onselect(self.verts)", "def on_release(self, event):\n self.current_point = None", "def mousePressEvent(self, ev):\n super(PlotObject, self).mousePressEvent(ev)\n self._downpos = self.mousePos", "def handle_mouse_click(self, button: Button) -> None:\n if button.name == 'BACK':\n self._clear_all_input()\n self.current_page -= 1\n self._focused_button = None\n if self.current_page == len(self.pages) - 2:\n self.current_page -= 1\n elif button.name == 'Show Graph':\n self._plot_graph()\n elif button.name == 'Multiple Regression':\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 2\n self._update_ghg_coefs()\n elif button.tag == 'normal' and self.current_page < len(self.pages) - 2:\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 1\n elif isinstance(button, InputButton):\n self._focused_button = button", "def check_mouse_release_for_buttons(x, y, button_list):\n for button in button_list:\n if x > button.center_x + button.width / 2:\n continue\n if x < button.center_x - button.width / 2:\n continue\n if y > button.center_y + button.height / 2:\n continue\n if y < button.center_y - button.height / 2:\n continue\n button.on_release()", "def ev_joybuttonup(self, event: tcod.event.JoystickButton) -> T | None:", "def on_mouse_press(self, x, y, button, modifiers):\n\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n\n if self.exclusive:\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n else:\n self.set_exclusive_mouse(True)", "def on_key_release(self, symbol, modifiers):\n self.gamestatemanager.peek().on_key_release(symbol, modifiers, self.config_data[\"controls\"])", "def leaveEvent(self, ev):\n if (self.panning):\n # don't immediately change pointer if we're panning\n self.__pointerLeftWidget = True\n else:\n self.setCursor(Qt.ArrowCursor)\n QGraphicsView.leaveEvent(self, ev)\n self.currentKbKey = None", "def handle_mouse(self, x, y):\n pass", "def OnLeftUp(self, event):\r\n\r\n self._on_button = False\r\n \r\n if self._is_dragging:\r\n\r\n if self.HasCapture():\r\n self.ReleaseMouse()\r\n \r\n self._is_dragging = False\r\n if self._drag_image:\r\n self._drag_image.EndDrag()\r\n del self._drag_image\r\n self._drag_image = None\r\n self.GetParent().Refresh()\r\n\r\n evt = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_END_DRAG, self.GetId())\r\n evt.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt.SetOldSelection(evt.GetSelection())\r\n evt.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt)\r\n\r\n return\r\n\r\n if self.HasCapture():\r\n self.ReleaseMouse()\r\n \r\n if self._pressed_button:\r\n \r\n # make sure we're still clicking the button\r\n button = self.ButtonHitTest(event.GetX(), event.GetY())\r\n \r\n if button is None:\r\n return\r\n\r\n if button != self._pressed_button:\r\n self._pressed_button = None\r\n return\r\n \r\n self.Refresh()\r\n self.Update()\r\n\r\n if self._pressed_button.cur_state & AUI_BUTTON_STATE_DISABLED == 0:\r\n \r\n evt = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BUTTON, self.GetId())\r\n evt.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt.SetInt(self._pressed_button.id)\r\n evt.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt)\r\n \r\n self._pressed_button = None\r\n \r\n self._click_pt = wx.Point(-1, -1)\r\n self._is_dragging = False\r\n self._click_tab = None", "def release(self, event):\n if not self.ignore(event) and self._eventpress:\n event = self._clean_event(event)\n self._eventrelease = event\n self._release(event)\n self._eventpress = None\n self._eventrelease = None\n self._state.discard('move')\n return True\n return False", "def on_mouse_leave (self, event):\n\n\t\tif not self.clicked:\n\n\t\t\tself.cursor_position = [-1,-1]\n\t\t\tself.redraw_canvas()\n\t\t\tself.hide_tip()#self.timer1 = gobject.timeout_add(2000, self.hide_tip)", "def debounced_key_release(event):\n # print('Debounced release', repr(event.key))\n key_indicator.set_text('')\n fig.canvas.draw()", "def on_key_release(self, key, modifiers):\n player_controller.input_release(key, self.player)" ]
[ "0.84587985", "0.8265383", "0.8027847", "0.7779169", "0.7402796", "0.7354275", "0.7343987", "0.73079485", "0.7235595", "0.7149236", "0.7088489", "0.7049613", "0.70101506", "0.70101506", "0.6996034", "0.6911698", "0.6900314", "0.6800558", "0.6790865", "0.67554575", "0.673073", "0.6726967", "0.67269105", "0.67118204", "0.66901714", "0.66560954", "0.66440475", "0.6620634", "0.66124225", "0.65951455", "0.65931445", "0.65742636", "0.6550351", "0.6540841", "0.65298724", "0.6521909", "0.65116954", "0.65067273", "0.6494714", "0.6472082", "0.64515465", "0.6443433", "0.6429005", "0.64157647", "0.6409425", "0.6378994", "0.63776565", "0.6351753", "0.63449585", "0.63400596", "0.6327742", "0.6304282", "0.6303661", "0.62965316", "0.6290771", "0.6282389", "0.6272602", "0.62614", "0.6256062", "0.6254756", "0.62450033", "0.6236139", "0.62303126", "0.62253684", "0.62219554", "0.62207335", "0.62196213", "0.6185367", "0.6174533", "0.6172327", "0.61604905", "0.6132307", "0.61242676", "0.60749745", "0.60742503", "0.60678643", "0.60147583", "0.5989144", "0.5987362", "0.59703445", "0.59673136", "0.59603065", "0.5959642", "0.59513986", "0.59334505", "0.59172934", "0.5916337", "0.58842784", "0.5872383", "0.5843666", "0.5829919", "0.58225", "0.58122045", "0.58022875", "0.5790955", "0.5774671", "0.5755553", "0.5745622", "0.5702214", "0.5685431" ]
0.723252
9
Compute the NLL loss for this task
def nll( self, model: nn.Module, batch: TupleMiniBatch, reduction: str = "mean", predict: bool = False, ): device = list(model.parameters())[0].device batch = batch.to(device) inputs = batch.inputs # Extract features with the model features = model(*inputs) nlls = self.nll_on_features(features, batch, reduction) if predict: predictions = self.predict_on_features(features) return (nlls,) + predictions else: return nlls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_loss(self):", "def nll_loss(predictions, targets, weights, reduction, ignore_index):\n return cpp.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def getLoss(self, x_test, t_test):\n x_t = Variable(x_test, requires_grad=False)\n #Feed inputes into neural network\n t_pred = self.model(x_t)\n #Now lets compute out loss\n loss = self.loss_fn(t_pred, t_test)\n return loss", "def loss(self):\n return la.norm(self.resids) / self.normX", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def loss_op(self):\n return self.loss", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def _compute_loss(self, predictions, targets, **params):\n pass", "def dnn_loss_calculation(self, labeled_examples, labels):\n predicted_labels, _ = self.DNN(labeled_examples)\n labeled_loss = self.labeled_loss_function(predicted_labels, labels, order=self.settings.labeled_loss_order)\n labeled_loss *= self.settings.labeled_loss_multiplier\n return labeled_loss", "def _get_loss_weight(self) -> torch.Tensor:\n n_pos: torch.Tensor = 0.0\n n_neg: torch.Tensor = 0.0\n\n for _, ground_truth in self.train_loader:\n n_poss_curr = ground_truth.sum()\n n_pos += n_poss_curr\n n_neg += ground_truth.numel() - n_poss_curr\n\n eps = torch.finfo(n_pos.dtype).eps\n return n_neg / (n_pos + eps)", "def loss_(self, batch):\n raise NotImplementedError", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def compute_loss(self, obs, returns):", "def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def nll_loss(\n inputs,\n axis=1,\n ignore_index=None,\n reduction='valid',\n **kwargs\n):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.NLLLoss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n axis=axis,\n reduction=args['reduction'],\n ignore_index=ignore_index,\n ).apply(inputs)\n else:\n return op_lib.blend(**args)", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def loss(self):\n return self._loss", "def loss(self) -> KernelLoss:\n return self._loss", "def loss(self, y: torch.Tensor, state: AlgorithmState) -> torch.Tensor:\n\n raise NotImplementedError()", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)", "def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def _get_ner_loss(self):\n # per example loss\n no_entity_id = self.config[\"model\"][\"ner\"][\"no_entity_id\"]\n logits_shape = tf.shape(self.ner_logits_train)\n labels_shape = logits_shape[:3]\n labels = get_dense_labels_from_indices(indices=self.ner_labels_ph, shape=labels_shape, no_label_id=no_entity_id)\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=self.ner_logits_train\n ) # [batch_size, num_tokens, num_tokens]\n\n # mask\n maxlen = logits_shape[1]\n span_mask = upper_triangular(maxlen, dtype=tf.float32)\n sequence_mask = tf.sequence_mask(self.num_tokens_ph, dtype=tf.float32) # [batch_size, num_tokens]\n mask = span_mask[None, :, :] * sequence_mask[:, None, :] * sequence_mask[:, :, None] # [batch_size, num_tokens, num_tokens]\n\n masked_per_example_loss = per_example_loss * mask\n total_loss = tf.reduce_sum(masked_per_example_loss)\n num_valid_spans = tf.cast(tf.reduce_sum(mask), tf.float32)\n loss = total_loss / num_valid_spans\n\n loss *= self.config[\"model\"][\"ner\"][\"loss_coef\"]\n return loss", "def loss_fn(self, targets, outputs, model):", "def get_loss(self):\n return self.loss / self.cnt", "def calculate_validation_loss(self):\n self.network.train()\n self.validation_average_loss = self.calculate_average_loss(self.validation_dataloader)", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def setup_loss(self):\n with vs.variable_scope(\"loss\"):\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_placeholder, logits=self.label_predictions))", "def _compute_loss(self, inputs, logits, targets, idxes):\n clf_loss = F.cross_entropy(logits, targets)\n\n if self._task == 0:\n distil_loss = torch.zeros(1, device=self._device)\n else:\n if self._finetuning:\n # We only do distillation on current task during the distillation\n # phase:\n last_index = len(self._task_idxes)\n else:\n last_index = len(self._task_idxes) - 1\n\n distil_loss = 0.\n #with torch.no_grad():\n previous_logits = self._old_model(inputs)\n\n for i in range(last_index):\n task_idxes = self._task_idxes[i]\n\n distil_loss += F.binary_cross_entropy(\n F.softmax(logits[..., task_idxes] / self._temperature, dim=1),\n F.softmax(previous_logits[..., task_idxes] / self._temperature, dim=1)\n )\n\n return clf_loss, distil_loss", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def nll_loss(\n input,\n target,\n weight=None,\n size_average=None,\n ignore_index=None,\n reduce=None,\n reduction='mean',\n):\n if size_average is not None or reduce is not None:\n reduction = _reduction.legacy_get_string(size_average, reduce)\n else:\n reduction = reduction\n return FunctionLib.apply(\n 'NLLLoss', input.device, [input, target],\n axis=1, ignore_index=ignore_index, reduction=reduction.upper())", "def do_loss(logits, labels):\n return tf.reduce_sum(tf.square(logits - labels))", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def _calc_loss(self, fvs, labels, w, b):\n\n loss = 0.5 * self.lda * (np.linalg.norm(w) ** 2)\n tmp = sum(map(lambda x, y: (x - y) ** 2, fvs.dot(w) + b, labels))\n loss += tmp / fvs.shape[0]\n\n return loss", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def get_loss(self):\n raise NotImplementedError", "def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n feat_static_cat = batch[\"feat_static_cat\"]\n feat_static_real = batch[\"feat_static_real\"]\n past_time_feat = batch[\"past_time_feat\"]\n past_target = batch[\"past_target\"]\n future_time_feat = batch[\"future_time_feat\"]\n future_target = batch[\"future_target\"]\n past_observed_values = batch[\"past_observed_values\"]\n\n picnn = self.model.picnn\n\n _, scale, hidden_state, _, _ = self.model.unroll_lagged_rnn(\n feat_static_cat,\n feat_static_real,\n past_time_feat,\n past_target,\n past_observed_values,\n future_time_feat,\n future_target,\n )\n\n hidden_state = hidden_state[:, : self.model.context_length]\n\n distr = self.model.output_distribution(picnn, hidden_state, scale)\n\n context_target = past_target[:, -self.model.context_length + 1 :]\n target = torch.cat(\n (context_target, future_target),\n dim=1,\n )\n\n loss_values = self.loss(distr, target)\n\n return loss_values.mean()", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def calc_loss(self, guess: List[float], answer: List[float]) -> float:\n #print(\"Guess: %s Answer: %s\" % (guess, answer))\n return self.tested_network.loss_function.func(guess, answer)", "def _get_loss(self):\n raise NotImplementedError", "def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def loss(\n self,\n model_in: torch.Tensor,\n target: Optional[torch.Tensor] = None,\n idx=None,\n next_obs=None,\n eval=False,\n ) -> torch.Tensor:\n loss = self._vaml_loss(model_in, target, idx=idx, next_obs=next_obs, eval=False)\n if self.add_mse:\n loss += self._mse_loss(model_in, target).mean(-1, keepdim=True)\n return loss.mean()", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def get_cost(self):\n\n\t\tx = self.symbolic_input\n\t\ty = self.get_reconstructed_input()\n\n\t\tnegative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +\n\t\t\t(1-x)*theano.tensor.log(1-y), axis=1)\n\n\t\tmean_loss = theano.tensor.mean(negative_log_loss)\n\n\t\treturn mean_loss", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def build_loss(self, n_loss, t_loss):\n loss = tf.add(n_loss, t_loss)\n return loss", "def _compute_loss(self, state, action, reward, next_state, done):\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n q_value = q_values[action]\n\n next_state = torch.FloatTensor(next_state)\n next_q_values = self.dqn(next_state)\n next_q_value = next_q_values.max()\n\n if done:\n target = reward\n else:\n target = reward + self.discount_factor * next_q_value\n\n loss = (q_value - target).pow(2).mean()\n\n return loss", "def get_loss(self, Loss, results, inputs, device):\n return", "def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total", "def build_nt_loss(self, n_logits, n_target):\n n_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_logits, labels=n_target)\n n_loss = tf.reduce_mean(n_loss)\n return n_loss", "def calculate_loss(model, t, logits, labels):\n model_para = model.get_paramaters_list_reshape()\n myTF.calculate_para_dependence_loss(model_para,t)\n\n myTF.calculate_cross_entropy_loss(logits, labels)\n\n return tf.add_n(tf.get_collection('losses'), name='loss_total')", "def get_loss(self, xs, y):\n return nn.SoftmaxLoss(self.run(xs), y)", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def loss(self, labels, input_data):\n\n pred, out = self.inference(input_data)\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels, out), name=\"loss\") + \\\n tf.losses.get_regularization_loss()\n return loss, pred", "def detector_loss(self, input, target, mask=None, loss_type=\"softmax\"):\n if loss_type == \"l2\":\n loss_func = nn.MSELoss(reduction=\"mean\")\n loss = loss_func(input, target)\n elif loss_type == \"softmax\":\n loss_func_BCE = nn.BCELoss(reduction='none').cuda()\n loss = loss_func_BCE(nn.functional.softmax(input, dim=1), target)\n loss = (loss.sum(dim=1) * mask).sum()\n loss = loss / (mask.sum() + 1e-10)\n return loss", "def calculate_total_loss(self, train_x, train_y):\n return np.sum([self.calculate_error(x, y)\n for x, y in zip(train_x, train_y)])", "def loss(self):\n return self._get(\"loss\")", "def computeLoss(self):\n return sum(np.arccosh(-minkowskiArrayDot(self.examples, self.centroid)) ** 2)[0] / np.shape(self.examples)[0]", "def compute_loss(self, sample):\n observations_batch, actions_batch, return_batch, masks_batch, \\\n old_action_log_probs_batch, adv_targ = sample\n\n assert old_action_log_probs_batch.shape == (self.mini_batch_size, 1)\n assert adv_targ.shape == (self.mini_batch_size, 1)\n assert return_batch.shape == (self.mini_batch_size, 1)\n\n values, action_log_probs, dist_entropy = self.evaluate_actions(\n observations_batch, actions_batch)\n\n assert values.shape == (self.mini_batch_size, 1)\n assert action_log_probs.shape == (self.mini_batch_size, 1)\n assert values.requires_grad\n assert action_log_probs.requires_grad\n assert dist_entropy.requires_grad\n\n # [TODO] Implement policy loss\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # [TODO] Implement value loss\n value_loss = F.mse_loss(return_batch, values)\n\n # This is the total loss\n loss = policy_loss + self.config.value_loss_weight * value_loss - self.config.entropy_loss_weight * dist_entropy\n\n return loss, policy_loss, value_loss, dist_entropy", "def compute_loss(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss by MSE / MAE\n # ***************************************************\n \n # vector e\n e = compute_e(y, tx, w)\n N = compute_N(e)\n L_MSE = np.dot(np.matrix.transpose(e), e)\n L_MSE = L_MSE / (2 * N)\n \n return L_MSE", "def _loss(self):\n\n cross_entropy = tf.reduce_mean(-tf.log(self.probability + epsilon) * self.y)\n self.loss = cross_entropy\n\n self.accuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(self.y, 1), self.prediction), tf.float32))", "def loss_fn(self, pred: Tensor, true: Tensor) -> Tensor:\n pass", "def loss_function(self, x, fwd_rtn):\n px_zs = fwd_rtn[\"px_zs\"]\n qz_x = fwd_rtn[\"qz_x\"]\n px_zss = fwd_rtn[\"px_zss\"]\n qz_xs = fwd_rtn[\"qz_xs\"]\n\n kl = self.calc_kl(qz_x)\n kl_separate = self.calc_kl(qz_xs)\n ll = self.calc_ll(x, px_zs)\n ll_separate = self.calc_ll(x, px_zss)\n\n total = kl + kl_separate - ll - ll_separate\n losses = {\"loss\": total, \"kl\": kl, \"ll\": ll, \"ll_separate\": ll_separate, \"kl_separate\": kl_separate}\n\n return losses", "def lfads_training_loss(params, lfads_hps, key, x_bxt, kl_scale, keep_rate):\n losses = lfads_losses(params, lfads_hps, key, x_bxt, kl_scale, keep_rate)\n return losses['total']", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def get_loss(self, inputs, outputs, add_summary=True):\n cfg = self.cfg()\n torch.autograd.set_detect_anomaly(True)\n # g_loss = tf.zeros(dtype=tf.float32, shape=[])\n g_loss = self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n r_loss = self.regularization_loss(cfg)\n# print(g_loss, r_loss)\n g_loss += r_loss\n # if cfg.proj_weight:\n # g_loss += self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n\n # if cfg.drc_weight:\n # g_loss += add_drc_loss(cfg, inputs, outputs, cfg.drc_weight, add_summary)\n #\n # if cfg.pc_rgb:\n # g_loss += add_proj_rgb_loss(cfg, inputs, outputs, cfg.proj_rgb_weight, add_summary, self._sigma_rel)\n #\n # if cfg.proj_depth_weight:\n # g_loss += add_proj_depth_loss(cfg, inputs, outputs, cfg.proj_depth_weight, self._sigma_rel, add_summary)\n #\n # if add_summary:\n # tf.contrib.summary.scalar(\"losses/total_task_loss\", g_loss)\n\n return g_loss", "def loss(self, logits, labels):\r\n return tf.reduce_mean(tf.keras.losses.binary_crossentropy(labels,logits))", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def calc_loss(predictions, labels):\n return np.mean(np.square(predictions - labels))", "def nt_transfer_loss(self, student_net_params, masks, teacher_net_params, x, density_level): \n\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params, masks)\n\n # split inputs into two collections, x1 and x2.\n x1 = x[:int(len(x)/2)]\n x2 = x[int(len(x)/2):]\n \n # student network prediction\n student_prediction = self.apply_fn(masked_student_net_params, x) \n \n # teacher network prediction\n teacher_prediction = self.apply_fn(teacher_net_params, x)\n\n # student network's NTK evaluated on x1 and x2\n student_ntk_mat = self.emp_ntk_fn(x1, x2, masked_student_net_params) \n\n # teacher network's NTK evaluated on x1 and x2\n teacher_ntk_mat = self.emp_ntk_fn(x1, x2, teacher_net_params) \n\n # compute kernel, target, and paramter l2 loss\n ker_dist, target_dist, param_squared_norm = self.kernel_dist_target_dist_l2_loss(student_ntk_mat, student_prediction, teacher_ntk_mat, teacher_prediction, masked_student_net_params)\n\n # weight these losses to get the transfer loss\n transfer_loss = self.LAMBDA_KER_DIST * ker_dist + target_dist + (self.LAMBDA_L2_REG / density_level) * param_squared_norm \n\n return transfer_loss", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # We are gonna store everythin in a dictionnary hidden\n hidden = {}\n hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:]))\n\n for i in range(self.L):\n idx = i + 1\n # Naming of the variable\n w = self.params['W' + str(idx)]\n b = self.params['b' + str(idx)]\n h = hidden['h' + str(idx - 1)]\n\n # Computing of the forward pass.\n # Special case of the last layer (output)\n if idx == self.L:\n h, cache_h = affine_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n # For all other layers\n else:\n h, cache_h = affine_relu_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n scores = hidden['h' + str(self.L)]\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n # Computing of the loss\n data_loss, dscores = softmax_loss(scores, y)\n reg_loss = 0\n for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:\n reg_loss += 0.5 * self.reg * np.sum(w * w)\n\n loss = data_loss + reg_loss\n\n # Backward pass\n\n hidden['dh' + str(self.L)] = dscores\n for i in range(self.L)[::-1]:\n idx = i + 1\n dh = hidden['dh' + str(idx)]\n h_cache = hidden['cache_h' + str(idx)]\n if idx == self.L:\n dh, dw, db = affine_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n else:\n dh, dw, db = affine_relu_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n\n # w gradients where we add the regulariation term\n list_dw = {key[1:]: val + self.reg * self.params[key[1:]]\n for key, val in hidden.iteritems() if key[:2] == 'dW'}\n # Paramerters b\n list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'}\n # Parameters gamma\n list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'}\n # Paramters beta\n list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'}\n grads = {}\n grads.update(list_dw)\n grads.update(list_db)\n grads.update(list_dgamma)\n grads.update(list_dbeta)\n return loss, grads", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def loss(self, **kwargs):\n pass", "def kl_loss(self):\n return sum([p.kl_loss() for p in self.parameters])", "def _compute_loss(self, decoder_output, labels):\n with tf.name_scope(\"compute_loss\"):\n language_logit = decoder_output.logits[0]\n attention_logit = decoder_output.logits[1]\n batch_size = self.params['dataset']['batch_size']\n\n language_losses = self._cross_entropy_sequence_loss(\n logits=language_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n attention_losses = self._cross_entropy_sequence_loss(\n logits=attention_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n\n language_loss = tf.reduce_sum(language_losses) / batch_size\n attention_loss = tf.reduce_sum(attention_losses) / batch_size\n loss = language_loss + attention_loss\n\n return loss", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def compute_batch_loss(self, batch_data):\n loss = 0\n for data in batch_data:\n x, y = data\n x = x.view(-1,x.shape[0],x.shape[1])\n y = y.view(-1,y.shape[0], y.shape[1])\n loss += self.compute_loss(x.to(self.device), y.to(self.device))\n \n return loss" ]
[ "0.7523511", "0.7353931", "0.73111176", "0.73111176", "0.71701425", "0.7164954", "0.713618", "0.71097046", "0.70807564", "0.7053136", "0.70448524", "0.70416677", "0.70341206", "0.70327586", "0.6955481", "0.6945738", "0.69216895", "0.69183487", "0.6891246", "0.6886024", "0.688434", "0.6881664", "0.6876219", "0.6858471", "0.68350726", "0.6813097", "0.68124986", "0.68096656", "0.6800859", "0.6795269", "0.67898387", "0.67806906", "0.6779554", "0.6779112", "0.67530334", "0.6752758", "0.6749906", "0.6743668", "0.6741382", "0.6735357", "0.6734576", "0.67322016", "0.6730113", "0.6713078", "0.67046934", "0.6700777", "0.66945726", "0.66910625", "0.6672483", "0.6668599", "0.66594666", "0.66517204", "0.66438", "0.6643468", "0.66296774", "0.6627861", "0.6627616", "0.66250825", "0.6614248", "0.66127896", "0.66033065", "0.6597789", "0.65955013", "0.65857154", "0.6567849", "0.6567156", "0.65499485", "0.6545295", "0.6538666", "0.6537339", "0.6530543", "0.65295684", "0.65293866", "0.65171736", "0.6504234", "0.6501697", "0.64984196", "0.6497542", "0.64962864", "0.64905816", "0.6487976", "0.64782137", "0.64722544", "0.64711916", "0.6464149", "0.6456039", "0.6452499", "0.64518714", "0.6447759", "0.6446523", "0.6446415", "0.64442885", "0.64437485", "0.6440213", "0.6439455", "0.6433044", "0.6429286", "0.64247614", "0.64219946", "0.6418348", "0.64127094" ]
0.0
-1
Compute the logits for this task
def logits(self, model, batch): device = list(model.parameters())[0].device batch = batch.to(device) inputs = batch.inputs # Extract features with the model features = model(*inputs) logits = self.logits_on_features(features, batch) return logits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_logits(self):\n # [num train labels, num classes] where each row is a one-hot-encoded label.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n\n # Undocumented in the paper, but *very important*: *only* the support set\n # embeddings is L2-normalized, which means that the distance is not exactly\n # a cosine distance. For comparison we also allow for the actual cosine\n # distance to be computed, which is controlled with the\n # `exact_cosine_distance` instance attribute.\n train_embeddings = tf.nn.l2_normalize(\n self.train_embeddings, 1, epsilon=1e-3)\n test_embeddings = self.test_embeddings\n if self.exact_cosine_distance:\n test_embeddings = tf.nn.l2_normalize(test_embeddings, 1, epsilon=1e-3)\n # [num_test_images, num_train_images]\n similarities = tf.matmul(\n test_embeddings, train_embeddings, transpose_b=True)\n attention = tf.nn.softmax(similarities)\n\n # [num_test_images, way]\n probs = tf.matmul(attention, one_hot_train_labels)\n self.test_logits = tf.log(probs)\n return self.test_logits", "def compute_edge_logits(self):", "def compute_logits(self):\n # [num test images, 1, embedding size].\n test_embeddings = tf.expand_dims(self.test_embeddings, 1)\n\n # [1, num_clases, embedding_size].\n prototypes = tf.expand_dims(self.prototypes, 0)\n\n # Squared euclidean distances between each test embedding / prototype pair.\n distances = tf.reduce_sum(tf.square(test_embeddings - prototypes), 2)\n self.test_logits = -distances\n return self.test_logits", "def __call__(self,logits):\n \n #sample from Gumbel(0, 1)\n uniform = self._srng.uniform(logits.shape,low=0,high=1)\n gumbel = -T.log(-T.log(uniform + self.eps) + self.eps)\n \n #draw a sample from the Gumbel-Softmax distribution\n return T.nnet.softmax((logits + gumbel) / self.temperature)", "def logits(self):\n return np.array([m['actor'] for m in self.model_outs], dtype=np.float32)", "def get_logits(self, hidden_states: torch.FloatTensor,\n temperature: float = 1.0):\n return self.logits(hidden_states) / temperature", "def _get_logit(self, input, target_idx, noise_idx):\n\n target_logit, noise_logit = self.get_score(input, target_idx, noise_idx)\n\n target_logit = target_logit.sub(self.norm_term)\n noise_logit = noise_logit.sub(self.norm_term)\n return target_logit, noise_logit", "def logits_placeholder(self):", "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def get_logits(self, logits):\n if not tf.is_tensor(logits):\n logits = tf.convert_to_tensor(value=logits)\n return logits / self._temperature", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def _compute_loss(self, inputs, logits, targets, idxes):\n clf_loss = F.cross_entropy(logits, targets)\n\n if self._task == 0:\n distil_loss = torch.zeros(1, device=self._device)\n else:\n if self._finetuning:\n # We only do distillation on current task during the distillation\n # phase:\n last_index = len(self._task_idxes)\n else:\n last_index = len(self._task_idxes) - 1\n\n distil_loss = 0.\n #with torch.no_grad():\n previous_logits = self._old_model(inputs)\n\n for i in range(last_index):\n task_idxes = self._task_idxes[i]\n\n distil_loss += F.binary_cross_entropy(\n F.softmax(logits[..., task_idxes] / self._temperature, dim=1),\n F.softmax(previous_logits[..., task_idxes] / self._temperature, dim=1)\n )\n\n return clf_loss, distil_loss", "def add_logits_op(self):\n with tf.variable_scope(\"bi-lstm\"):\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_embeddings,\n sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout)\n\n with tf.variable_scope(\"proj\"):\n W = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[2*self.config.hidden_size_lstm, self.config.ntags])\n\n b = tf.get_variable(\"b\", shape=[self.config.ntags],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps = tf.shape(output)[1]\n output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])\n pred = tf.matmul(output, W) + b\n self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/26')", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/27')", "def get_logits(image):\n x = image\n for filters in (32, 64):\n x = tf.layers.conv2d(x, filters, 3)\n x = tf.nn.relu(x)\n x = tf.layers.max_pooling2d(x, 3, 2)\n x = tf.reduce_mean(x, axis=(1, 2))\n logits = tf.layers.dense(x, 10)\n return logits", "def logits(self, features: torch.Tensor) -> torch.Tensor:\n return self.temporal_module(features)", "def evaluate_(self, inputs):\n log_wks = []\n count = None\n N = self.config['eval_N']\n L = self.config['eval_repeats']\n\n for _ in xrange(N):\n log_wk, count = self.explore_(inputs, L)\n log_wks.append(log_wk)\n\n log_wk = np.concatenate(log_wks, axis=1)\n log_wk_sum = logSumExp(log_wk, axis=1, status='numpy')\n\n nll = np.mean(-(log_wk_sum - np.log(N * L)))\n perplexity = np.exp(np.mean(-(log_wk_sum - np.log(N * L)) / count))\n\n return nll, perplexity", "def compute_gradients(self, logits, target):\n\n target_length = target.shape[0]\n num_time_steps = logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n\n # expand labels by inserting a blank between each pair\n normalized_logits = softmax(logits)\n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n alpha = self.compute_forward_variables(normalized_logits, target) \n beta = self.compute_backward_variables(normalized_logits, target)\n\n # rescale\n alpha = alpha / np.sum(alpha, axis=0)\n beta = beta / np.sum(beta, axis=0)\n alphabeta = alpha * beta\n print \"alpha\"\n print alpha\n\n # compute zt\n z = Counter()\n for t in xrange(num_time_steps):\n for s, k in enumerate(l):\n z[t] += alphabeta[s, t] / normalized_logits[t, k]\n \n # normalized_logits is time steps t by labels k\n # alpha is 2 * target_length - 1 by time steps\n lab_zk = np.zeros_like(normalized_logits)\n for s, k in enumerate(l):\n for t in xrange(num_time_steps):\n lab_zk[t, k] += alphabeta[s, t]\n\n grad = normalized_logits\n for k in xrange(target.shape[0]):\n for t in xrange(num_time_steps):\n ytk = normalized_logits[t, k]\n constant = 1.0 / (ytk * z[t])\n grad[t, k] = ytk - constant * lab_zk[t, k]\n \n return grad", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n return mi", "def log_prob(self):", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True", "def logit_cost(self, theta, X, y):\n\n cost = 0.0\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n \n for i in range(0, X.shape[0]):\n cost += (y[i]-1)*theta[i] + np.log(sig[i])\n ### END YOUR CODE\n cost = cost #+ 0.01 * self.regularizer[0](self.weights)\n return cost", "def on_train_begin(self, logs={}):\n self.losses = []\n self.accuracies = []", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n\n vi = h1 + h2 - 2 * mi\n return vi", "def logIP(self): # just use base?\n np.log(self.t, out=self.t)\n return self", "def compute_forward_variables(self, normalized_logits, target):\n\n target_length = target.shape[0]\n num_time_steps = normalized_logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n \n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n # init\n alpha = np.zeros((target_length, num_time_steps))\n alpha[0, 0] = normalized_logits[0, blank_label] # where s = 0, t = 0\n alpha[1, 0] = normalized_logits[0, target[0]] # where s = 1, t = 0\n for i in xrange(2, num_time_steps): # for all s >= 2, t = 0\n alpha[i, 0] = 0\n\n # recursive case\n for t in xrange(1, num_time_steps):\n for s in xrange(2, target_length):\n \n a_bar = alpha[s, t-1] + alpha[s-1, t-1] \n\n if l[s] == blank_label or l[s-2] == l[s]:\n alpha[s, t] = normalized_logits[t, l[s]] * a_bar\n else:\n alpha[s, t] = normalized_logits[t, l[s]] * (a_bar + alpha[s-2, t-1])\n return alpha", "def scores(self, eouts, temperature=1.0):\n return torch.log_softmax(self.output(eouts) / temperature, dim=-1)", "def _get_outputs(self, inputs, seq_lengths):\n\n\t\twith tf.name_scope('evaluate_logits'):\n\t\t\tlogits, _ = run_multi_model.run_multi_model(\n\t\t\t\tmodels=self.models,\n\t\t\t\tmodel_nodes=self.model_nodes,\n\t\t\t\tmodel_links=self.model_links,\n\t\t\t\tinputs=inputs,\n\t\t\t\tinputs_links=self.inputs_links,\n\t\t\t\tnodes_output_names=self.nodes_output_names,\n\t\t\t\toutput_names=self.output_names,\n\t\t\t\tseq_lengths=seq_lengths,\n\t\t\t\tis_training=False)\n\n\t\treturn logits", "def call(self, inputs):\n # print(f'type(inputs)={type(inputs)}.')\n # Transform a, e, and R from log to linear\n a = self.get_a()\n e = self.get_e()\n inc = self.get_inc()\n Omega = self.get_Omega()\n omega = self.get_omega()\n f = self.get_f()\n R = self.get_R()\n return a, e, inc, Omega, omega, f, self.epoch, R", "def log_forward_computations(self, x: list): \n n_x = len(x)\n \n # log_f_x initialized to -Inf because log(0) = -Inf\n log_f_x = np.zeros((self.n_states, n_x)) + logzero()\n x_emission_scores = np.array([self.scores['emission'][:, self.word_to_pos[w] if w in list(self.word_to_pos.keys()) else self.word_to_pos['UnknownWord']] for w in x]).T\n \n log_f_x[:,0] = x_emission_scores[:, 0] + self.scores['initial']\n \n for i in range(1,n_x):\n for s in range(self.n_states):\n log_f_x[s,i] = logsumexp(self.scores['transition'][s,:] + \n log_f_x[:,i-1]) + x_emission_scores[s, i]\n\n \n log_likelihood = logsumexp(self.scores['final'] + log_f_x[:,-1])\n \n return log_f_x, log_likelihood", "def evaluate(observations, model, states=None, log=False):\r\n N = model.N\r\n T = observations.shape[0]\r\n A = numpy.log(model.A)\r\n B = numpy.log(model.B)\r\n\r\n if states is None:\r\n alphas = forward_path(observations, numpy.log(model.pi), A, B, T, N)\r\n\r\n \"\"\" Termination \"\"\"\r\n result = add_logs(alphas[T-1, :])\r\n if log:\r\n return result\r\n else:\r\n return math.exp(result)\r\n\r\n else:\r\n result = 0\r\n for i in range(T):\r\n result += B[states[i], observations[i]]\r\n\r\n if log:\r\n return result\r\n else:\r\n return math.exp(result)", "def __call__(self, x_input, batch_size=None, is_training=False):\n reuse = True if self.built else None\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n with tf.variable_scope(self.ckpt):\n logits, end_points = resnet_v2.resnet_v2_50(\n x_input, num_classes=self.num_classes, is_training=is_training,\n reuse=reuse)\n\n preds = tf.argmax(logits, axis=1)\n self.built = True\n self.logits = logits\n self.preds = preds\n return logits", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def compute_loglike(self, tools: ModelingTools) -> float:\n return -3.0 * self.placeholder", "def valuate(self, outputs=None):\n\n # Compute if not given\n if not outputs:\n frames = next(self.dataset_it)\n outputs = self._model_compute_all(frames)\n\n # Collect all metrics\n metrics = {\n \"metrics/\" + name: value\n for name, value in outputs[\"metrics\"].items()}\n if outputs[\"loss\"] is not None:\n metrics[\"loss\"] = outputs[\"loss\"]\n metrics[\"learning_rate\"] = self.learning_rate if not self.decay_rate \\\n else self.learning_rate(self._step)\n\n # Log scalars (convert to mean max if necessary)\n scalars = {}\n for name, value in metrics.items():\n is_scalar = (\n isinstance(value, int) or isinstance(value, float) or\n value.ndim == 0)\n if is_scalar:\n scalars[name] = value\n else:\n scalars[name + \"_mean\"] = tf.math.reduce_mean(value)\n scalars[name + \"_max\"] = tf.math.reduce_max(value)\n self.logger.save_scalars(self._step, scalars)\n\n # Log images\n images = self.model.images(outputs[\"outputs\"])\n self.logger.save_images(self._step, images)\n\n # Log histograms\n histograms = self.model.histograms(outputs[\"outputs\"])\n self.logger.save_histogram(self._step, histograms)\n\n # Transform tensors to arrays for nice logs\n scalars = {\n name: var.numpy() if isinstance(var, tf.Tensor) else var\n for name, var in scalars.items()\n }\n\n return scalars", "def log(self):\n return F.Log.apply(self)", "def total_score(self, logits):\n previous = torch.full((1, self.tag_size), -10000., device=device)\n previous[0][self.tag_map[self.start_tag]] = 0.\n\n for index in range(len(logits)):\n previous = previous.expand(self.tag_size, self.tag_size).t()\n emit = logits[index].view(1, -1).expand(self.tag_size, self.tag_size)\n scores = previous + emit + self.transitions\n previous = log_sum_exp(scores)\n\n # previous = previous + self.transitions[:, self.tag_map[self.stop_tag]]\n # previous += self.transitions[self.tag_map[self.stop_tag]]\n previous += self.transitions[self.tag_map[:, self.stop_tag]]\n total_scores = log_sum_exp(previous.t())[0]\n return total_scores", "def logrels(rets):\n return np.log(rets + 1)", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def __call__(self, x_input, batch_size=None, is_training=False):\n reuse = True if self.built else None\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n with tf.variable_scope(self.ckpt):\n logits, end_points = resnet_v2.resnet_v2_152(\n x_input, num_classes=self.num_classes, is_training=is_training,\n reuse=reuse)\n\n preds = tf.argmax(logits, axis=1)\n self.built = True\n self.logits = logits\n self.preds = preds\n return logits", "def _loglike(self, y, f):\n # sum along last axis, which is assumed to be the `tasks` axis\n ll = tf.reduce_sum(y * tf.log(pos(f)), axis=-1)\n return ll", "def __init__(self):\n self.counts = [0] * 10\n self.values = [0] * 10\n self.softmaxvalues = [0] * 10\n self.t = 0.3", "def __call__(self, x_input, batch_size=None, is_training=False):\n reuse = True if self.built else None\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n with tf.variable_scope(self.ckpt):\n logits, end_points = resnet_v2.resnet_v2_101(\n x_input, num_classes=self.num_classes, is_training=is_training,\n reuse=reuse)\n\n preds = tf.argmax(logits, axis=1)\n self.built = True\n self.logits = logits\n self.preds = preds\n return logits", "def entropy(self, logits):\n probs = torch.exp(logits)\n entropy = - torch.sum(probs * logits, dim=-1)\n return entropy", "def logloss(self,tple):\n feats = self.dataset.input_features\n res = 0\n cc = self.class_counts\n fc = self.feature_counts\n for c in range(self.num_classes):\n res += prod(fc[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))/(cc[c]**(len(feats)-1))\n if res>0:\n return -math.log2(res/len(self.dataset.train))\n else:\n return float(\"inf\") #infinity", "def call(self, inputs, full_out=True):\n xn = self.xn * self.sigma * sqrt2 + self.mu\n u1 = self.mu[:, :, :-1, ...]\n o1 = self.sigma[:, :, :-1, ...]\n u2 = tf.gather(self.mu, self.id, axis=2)\n o2 = tf.gather(self.sigma, self.id, axis=2)\n x1 = self.xi * o1 * sqrt2 + u1\n x2 = self.xj * o2 * sqrt2 + u2\n xe = tf.stack([x1, x2], -1)\n # ------------------------------log pdf -> log of sum of each mixture component-------------------------------\n alpha = tf.nn.softmax(self.w, axis=3)\n alf = tf.expand_dims(alpha, 3)\n o = tf.expand_dims(self.sigma, 3)\n # lpn = tf.math.divide_no_nan(tf.exp(-((tf.expand_dims(xn, 4) -\n # tf.expand_dims(self.mu, 3)) / o) ** 2 / 2), o * sqrt2pi)\n lpn = tf.exp(-((tf.expand_dims(xn, 4) - tf.expand_dims(self.mu, 3)) / o) ** 2 / 2) / (o * sqrt2pi)\n lpn = tf.math.log(tf.reduce_sum(lpn * alf, 4) + 1e-307)\n lpn = tf.concat( # intercept NaN -> 0\n [tf.concat([lpn[:, :1, :729, ...], tf.zeros_like(lpn)[:, 1:, :729, ...]], 1), lpn[:, :, 729:, ...]], 2)\n o1_ = tf.expand_dims(o1, 3)\n o2_ = tf.expand_dims(o2, 3)\n b1 = tf.exp(-((tf.expand_dims(x1, 4) - tf.expand_dims(u1, 3)) / o1_) ** 2 / 2) / (o1_ * sqrt2pi)\n b1 = tf.concat( # intercept NaN -> 1\n [tf.concat([b1[:, :1, :729, ...], tf.ones_like(b1)[:, 1:, :729, ...]], 1), b1[:, :, 729:, ...]], 2)\n b2 = tf.exp(-((tf.expand_dims(x2, 4) - tf.expand_dims(u2, 3)) / o2_) ** 2 / 2) / (o2_ * sqrt2pi)\n # lpe = tf.math.divide_no_nan(tf.exp(-((tf.expand_dims(x1, 4) - tf.expand_dims(u1, 3)) / o1_) ** 2 / 2\n # - ((tf.expand_dims(x2, 4) - tf.expand_dims(u2, 3)) / o2_) ** 2 / 2),\n # o1_ * o2_ * _2pi)\n lpe = tf.math.log(tf.reduce_sum(b1 * b2 * alf, 4) + 1e-307)\n return (xn, xe, lpn, lpe, alpha, o1, o2) if full_out else (xn, xe, lpn, lpe, alpha)", "def evaluate_m(logits, labels):\n sum_m_a_p = 0\n sum_m_r_r = 0\n sum_p_1 = 0\n sum_r_1 = 0\n sum_r_2 = 0\n sum_r_5 = 0\n\n data = []\n total_num = 0\n for i in range(len(logits)):\n if i % 10 == 0:\n data = []\n data.append((float(logits[i]), int(labels[i])))\n if i % 10 == 9:\n total_num += 1\n m_a_p, m_r_r, p_1, r_1, r_2, r_5 = evaluation_one_session(data)\n sum_m_a_p += m_a_p\n sum_m_r_r += m_r_r\n sum_p_1 += p_1\n sum_r_1 += r_1\n sum_r_2 += r_2\n sum_r_5 += r_5\n\n return (1.0 * sum_m_a_p / total_num, 1.0 * sum_m_r_r / total_num, 1.0 * sum_p_1 / total_num,\n 1.0 * sum_r_1 / total_num, 1.0 * sum_r_2 / total_num, 1.0 * sum_r_5 / total_num)", "def compute_all(self) -> None:\n self.compute_j_matrix()\n self.compute_outter_distribution()\n self.compute_max_prior()\n self.compute_max_poutter()", "def on_train_begin(self, logs={}):\n self._beta = []", "def compute_edge_logits(self):\n V, E, K, M = self._VEKM\n vert_logits = logprob_dc(self._vert_ss, self._vert_prior, axis=1)\n if len(self._added_rows) == V:\n assignments = self._assignments\n else:\n assignments = self._assignments[sorted(self._added_rows), :]\n assignments = np.array(assignments, order='F')\n parallel = self._config['learning_parallel']\n result = treecat_compute_edge_logits(M, self._tree.complete_grid,\n self._gammaln_table, assignments,\n vert_logits, parallel)\n result += self._tree_prior\n return result", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def log_pseudo_joint(self, data: Tensor, states: Tensor) -> Tensor: # type: ignore\n K = states\n Y = data\n assert K.dtype == to.uint8 and Y.dtype == to.uint8\n pi = self.theta[\"pies\"]\n W = self.theta[\"W\"]\n batch_size, S, H = K.shape\n D = W.shape[0]\n dev = pi.device\n\n logPriors = to.matmul(K.type_as(pi), to.log(pi / (1 - pi)))\n\n logPy = to.empty((batch_size, S), device=dev, dtype=self.precision)\n # We will manually set the lpjs of all-zero states to the appropriate value.\n # For now, transform all-zero states in all-one states, to avoid computation of log(0).\n zeroStatesInd = to.nonzero((K == 0).all(dim=2))\n # https://discuss.pytorch.org/t/use-torch-nonzero-as-index/33218\n zeroStatesInd = (zeroStatesInd[:, 0], zeroStatesInd[:, 1])\n K[zeroStatesInd] = 1\n # prods_nsd = prod{h}{1-W_dh*K_nkh}\n prods = (W * K.type_as(W).unsqueeze(2)).neg_().add_(1).prod(dim=-1)\n to.clamp(prods, self.eps, 1 - self.eps, out=prods)\n # logPy_nk = sum{d}{y_nd*log(1/prods_nkd - 1) + log(prods_nkd)}\n f1 = to.log(1.0 / prods - 1.0)\n indeces = 1 - Y[:, None, :].expand(batch_size, S, D)\n # convert to BoolTensor in pytorch>=1.2, leave it as ByteTensor in earlier versions\n indeces = indeces.type_as(to.empty(0) < 0)\n f1[indeces] = 0.0\n logPy[:, :] = to.sum(f1, dim=-1) + to.sum(to.log(prods), dim=2)\n K[zeroStatesInd] = 0\n\n lpj = logPriors + logPy\n # for all-zero states, set lpj to arbitrary very low value if y!=0, 0 otherwise\n # in the end we want exp(lpj(y,s=0)) = 1 if y=0, 0 otherwise\n lpj[zeroStatesInd] = -1e30 * data[zeroStatesInd[0]].any(dim=1).type_as(lpj)\n assert (\n not to.isnan(lpj).any() and not to.isinf(lpj).any()\n ), \"some NoisyOR lpj values are invalid!\"\n return lpj.to(device=states.device) # (N, S)", "def call(self, x, mask=None):\n if K.backend() == 'theano':\n from theano import scan\n components, update = scan(fn=lambda tx: self.logm(tx),\n outputs_info=None,\n sequences=[x],\n non_sequences=None)\n\n return components\n else:\n if self.built:\n # return self.logm(x)\n from kyu.tensorflow.ops.svd_gradients import gradient_eig_for_log\n import tensorflow as tf\n # g = tf.get_default_graph()\n\n # s, u, v = tf.svd(x)\n s, u = tf.self_adjoint_eig(x)\n s = tf.abs(s)\n inner = s + self.eps\n # inner = tf.Print(inner, [inner], message='log_inner before:')\n\n inner = tf.log(inner)\n # inner = tf.Print(inner, [inner], message='log_inner :')\n inner = tf.where(tf.is_nan(inner), tf.zeros_like(inner), inner)\n inner = tf.matrix_diag(inner)\n tf_log = tf.matmul(u, tf.matmul(inner, tf.transpose(u, [0, 2, 1])))\n return tf_log\n\n else:\n raise RuntimeError(\"Log transform layer should be built before using\")", "def on_train_end(self, logs={}):\n LOSSES.append(self.losses)\n print(self.j)\n (x_test, y_test) = get_test_data(self.j)\n y_pred = model.predict(x_test)\n y_pred = y_pred.squeeze()\n y_pred[y_pred < 0.5] = 0\n y_pred[y_pred >= 0.5] = 1\n print(y_pred)\n\n y_test = y_test.squeeze()\n print(y_test)\n confmat = confusion_matrix(y_test,y_pred)\n print(confmat)\n calc_TSS(confmat,2)", "def train(self):\n # Restore models\n global_step = self._restore_models_and_step()\n \n if self.gold and global_step >= self.gold_step:\n self.netD.use_gold = True\n\n print(\"INFO: Starting training from global step {}...\".format(\n global_step))\n logit_save_num = 0\n\n self.logit_results = defaultdict(dict)\n\n try:\n start_time = time.time()\n\n # Mixed precision\n if self.amp:\n print(\"INFO: Using mixed precision training...\")\n scaler = torch.cuda.amp.GradScaler()\n else:\n scaler = None\n\n # Iterate through data\n iter_dataloader = iter(self.dataloader)\n if self.train_drs:\n iter_dataloader_drs = iter(self.dataloader_drs)\n while global_step < self.num_steps:\n log_data = metric_log.MetricLog() # log data for tensorboard\n\n if self.topk:\n self.netG.decay_topk_rate(global_step, epoch_steps=len(self.dataloader))\n\n if self.gold and global_step == self.gold_step:\n self.netD.use_gold = True\n # -------------------------\n # One Training Step\n # -------------------------\n # Update n_dis times for D\n for i in range(self.n_dis):\n iter_dataloader, real_batch = self._fetch_data(\n iter_dataloader=iter_dataloader)\n\n # ------------------------\n # Update D Network\n # -----------------------\n log_data = self.netD.train_step(\n real_batch=real_batch,\n netG=self.netG,\n optD=self.optD,\n log_data=log_data,\n global_step=global_step,\n device=self.device,\n scaler=scaler)\n\n # train netD2 for DRS\n if self.train_drs:\n iter_dataloader_drs, real_batch_drs = self._fetch_data(\n iter_dataloader=iter_dataloader_drs)\n log_data = self.netD_drs.train_step(\n real_batch=real_batch_drs,\n netG=self.netG,\n optD=self.optD_drs,\n log_data=log_data,\n global_step=global_step,\n device=self.device,\n scaler=scaler)\n\n # -----------------------\n # Update G Network\n # -----------------------\n # Update G, but only once.\n if i == (self.n_dis - 1):\n log_data = self.netG.train_step(\n real_batch=real_batch,\n netD=self.netD,\n optG=self.optG,\n global_step=global_step,\n log_data=log_data,\n device=self.device,\n scaler=scaler)\n\n # --------------------------------\n # Update Training Variables\n # -------------------------------\n global_step += 1\n\n log_data = self.scheduler.step(log_data=log_data,\n global_step=global_step)\n\n # -------------------------\n # Logging and Metrics\n # -------------------------\n if global_step % self.log_steps == 0:\n self.logger.write_summaries(log_data=log_data,\n global_step=global_step)\n\n if global_step % self.print_steps == 0:\n curr_time = time.time()\n topk_rate = self.netG.topk_rate if hasattr(self.netG, 'topk_rate') else 1\n log_data.add_metric(f'topk_rate', topk_rate, group='topk_rate', precision=6)\n self.logger.print_log(global_step=global_step,\n log_data=log_data,\n time_taken=(curr_time - start_time) /\n self.print_steps)\n start_time = curr_time\n\n if global_step % self.vis_steps == 0:\n if 'gaussian' in self.log_dir:\n plot_gaussian_samples(netG=self.netG,\n global_step=global_step,\n log_dir=self.log_dir,\n device=self.device)\n else:\n self.logger.vis_images(netG=self.netG,\n global_step=global_step)\n \n if self.save_logits and global_step % self.logit_save_steps == 0 and global_step >= self.save_logit_after and global_step <= self.stop_save_logit_after:\n if self.train_drs:\n netD = self.netD_drs\n netD_name = 'netD_drs'\n else:\n netD = self.netD\n netD_name = 'netD'\n mode = 'eval' if self.save_eval_logits else 'train'\n print(f\"INFO: logit saving {mode} netD: {netD_name}...\")\n logit_list = self._get_logit(netD=netD, eval_mode=mode=='eval')\n self.logit_results[f'{netD_name}_{mode}'][global_step] = logit_list\n\n logit_save_num += 1\n\n if global_step % self.save_steps == 0:\n print(\"INFO: Saving checkpoints...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n print(\"INFO: Saving final checkpoints...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n except KeyboardInterrupt:\n print(\"INFO: Saving checkpoints from keyboard interrupt...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n finally:\n self.logger.close_writers()\n\n print(\"INFO: Training Ended.\")", "def on_train_end(self, logs=None):", "def on_train_end(self, logs=None):", "def tent(x: torch.Tensor) -> torch.Tensor:\n return -(x.softmax(1) * x.log_softmax(1)).sum(1).mean(0)", "def nits(self):", "def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt", "def output(self, x_tensor, num_outputs):\n shape = x_tensor.get_shape().as_list()\n weights = tf.Variable(tf.truncated_normal([shape[-1], num_outputs], mean=0, stddev=0.01))\n biases = tf.Variable(tf.zeros([num_outputs]))\n logits = tf.add(tf.matmul(x_tensor, weights), biases)\n return logits", "def compute_loglike(self, tools: ModelingTools) -> float:\n return -1.5", "def inference(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n # Shapes of layers\n W_shapes = [self.input_dim] + self.n_hidden + [self.n_classes]\n W_shapes = [(W_shapes[i], W_shapes[i + 1]) for i in range(len(W_shapes) - 1)]\n\n Z = x\n for layer_num, shape in enumerate(W_shapes):\n layer_name = 'dense_{}'.format(layer_num)\n Z = self._dense_layer(inputs=Z, W_shape=shape, scope_name=layer_name)\n\n logits = Z\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return logits", "def compute_metrics(self):\n pass", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def logits_on_features(self, h, batch):\n batch = batch.to(h.device)\n # Extract features with the model\n features = h.view(batch.size, -1)\n # Log loss\n logits = self.head(features)\n return logits", "def log_likelihood_exp(self, x):\n predictions = self.get_predictions(x)\n ll = 0.\n for measurement in self.get_measurements:\n m_obj = flavio.Measurement[measurement]\n m_obs = m_obj.all_parameters\n exclude_observables = set(m_obs) - set(self.observables)\n prob_dict = m_obj.get_logprobability_all(predictions, exclude_parameters=exclude_observables)\n ll += sum(prob_dict.values())\n return ll", "def get_cross_entropy(self):\n assert (self.dataset is not None) and (self.labels is not None), 'Logistic Regression requires a dataset and labels.'\n potential = 0.0\n logits = self.dataset @ self.parameters[:self.dataset.shape[1]]\n max_logits = torch.max(torch.zeros(logits.shape[0]),logits)\n potential = (-logits @ self.labels.t() + torch.sum(max_logits) + torch.sum(\n torch.log(torch.exp(-max_logits)+torch.exp(logits - max_logits))))# * n.reciprocal())\n return potential", "def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))", "def nll_logprobs(self, input, target_idx):\n raise NotImplementedError()", "def compute_cost(self,X, y):\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n \r\n one_hot_y = np.zeros((num_examples,np.max(y)+1))\r\n logloss = np.zeros((num_examples,)) \r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i,y[i]] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i,:]) * one_hot_y[i,:])\r\n data_loss = np.sum(logloss)\r\n return 1./num_examples * data_loss", "def evaluate_and_log (self, config, budget):\n\n start = time.time()\n res = self.compute(config, budget=budget)\n end = time.time()\n\n\n id = (len(self.run_data), 0,0)\n\n # construct a Datum object to mimic the internals of a HpBandSter iteration\n res_dict = {budget: {'loss': res['loss'], 'info': res['info']}}\n ts_dict = {budget: {'submitted': start, 'started': start, 'finished': end}}\n self.run_data[id] = Datum(config, {}, results=res_dict, budget=budget, time_stamps = ts_dict, status='FINISHED')\n\n return(res[\"loss\"])", "def getlogits(self, words, labels, weight, bias, emb):\n \n w = tf.nn.embedding_lookup(weight, words)\n b = tf.nn.embedding_lookup(bias, labels)\n word_emb = tf.nn.embedding_lookup(emb, labels)\n result = tf.reduce_sum(tf.mul(word_emb, w), 1) + b\n #result = tf.maximum(tf.minimum(result, 10), -10)\n return result", "def logit_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n target_ids = args.target_ids.reshape(logits.shape[0], 1)\n return logits.gather(-1, target_ids).squeeze(-1)", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())", "def logistic(weights, data, targets, hyperparameters):\n # TODO: Finish this function\n n_data = len(data)\n dim_data = len(data[0])\n\n f = 0\n y = logistic_predict(weights, data)\n\n data = mod_data(data)\n\n # dl/dw_j = SUM(x_ij * (t_i - (1 - sigmoid(z))))\n df = np.dot(data.T, (1.0 * targets) - (1 - y))\n\n # to calculate f, we need to sum the negative log of all y iff target is 0 and (1-y) iff target is 1\n f = -1.0 * np.dot(targets.T, np.log(1 - y)) - 1.0 * np.dot(1 - targets.T, np.log(y))\n\n # calculate P(C=0|x_i) for all x_i \n return f[0,0], df, y", "def on_train_begin(self, logs={}):\n self.losses = []\n self.val_losses = []", "def log_prob(self, x):\n n_samples = x.shape[0].value\n log_prob = 0\n for i in range(self.time_steps):\n obs_dim = self.transition.dim_x\n # Current time step latent state and observation\n x_t = tf.slice(x, [0, i * obs_dim], [-1, obs_dim])\n if i == 0:\n log_prob += tf.reduce_sum(\n self.init_state_p.log_prob(x_t), axis=1)\n else:\n log_prob += self.transition.log_prob(x_t, x_tminus)\n # Update the previous latent state for next iteration\n x_tminus = x_t\n return log_prob", "def initial_state(self):\n r = np.full((self.xlen, 2), self.log0, dtype=np.float32)\n r[0, 1] = self.log_probs[0, self.blank]\n for i in range(1, self.xlen):\n r[i, 1] = r[i - 1, 1] + self.log_probs[i, self.blank]\n return r", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def _get_logits(images,\n model_options,\n outputs_to_num_classes,\n weight_decay=0.0001,\n reuse=tf.AUTO_REUSE,\n is_training=False,\n fine_tune_batch_norm=False):\n features, end_points = _extract_features(\n images,\n model_options,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n # TODO: CHECK\n DEBUG_VARS.aspp_result = features\n if model_options.decoder_output_stride is not None:\n decoder_height = scale_dimension(model_options.crop_size[0],\n 1.0 / model_options.decoder_output_stride)\n decoder_width = scale_dimension(model_options.crop_size[1],\n 1.0 / model_options.decoder_output_stride)\n features = refine_by_decoder(\n features,\n end_points,\n decoder_height=decoder_height,\n decoder_width=decoder_width,\n decoder_use_separable_conv=model_options.decoder_use_separable_conv,\n model_variant=model_options.model_variant,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n outputs_to_logits = {}\n for output in sorted(outputs_to_num_classes):\n outputs_to_logits[output] = _get_branch_logits(\n features,\n outputs_to_num_classes[output],\n model_options.atrous_rates,\n aspp_with_batch_norm=model_options.aspp_with_batch_norm,\n kernel_size=model_options.logits_kernel_size,\n weight_decay=weight_decay,\n reuse=reuse,\n scope_suffix=output)\n\n return outputs_to_logits", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def log10(self):\n return Factor().__build( VarSet(self.v) , np.log10(self.t) )", "def __call__(self, logits, elens, ys, ylens):\n with torch.no_grad():\n ys = [np2tensor(np.fromiter(y, dtype=np.int64), logits.device) for y in ys]\n ys_in_pad = pad_list(ys, 0)\n mask = make_pad_mask(elens)\n mask = mask.unsqueeze(2).expand_as(logits)\n logits = logits.masked_fill_(mask == 0, LOG_0)\n log_probs = torch.log_softmax(logits, dim=-1).transpose(0, 1)\n trigger_points = self.align(log_probs, elens, ys_in_pad, ylens)\n return trigger_points", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def get_cost(self, Y, T):\n return - np.multiply(T, np.log(Y)).sum() / Y.shape[0]", "def _calculate_log_det(self, var):\n log_det = []\n\n for k in range(self.n_components):\n evals, evecs = tf.linalg.eig(var[0, k])\n\n log_det.append(tf.reduce_sum(tf.math.log(tf.math.real(evals))))\n log_det = tf.convert_to_tensor(log_det)\n return tf.expand_dims(log_det, -1)", "def compute_per_list(self, labels, logits, weights, mask=None):\n raise NotImplementedError('Calling an abstract method.')", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)\n loss = compute_loss_log(y, tx, w)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n \n return w, loss", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def logTF(self, tf):\n return math.log(tf)", "def reduce_metrics(logging_outputs) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n neg_elbo_sum = sum(log.get('neg_elbo', 0) for log in logging_outputs)\n recon_loss_sum = sum(log.get('recon_loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n KLz_sum = sum(log.get('KLz', 0) for log in logging_outputs)\n KLt_sum = sum(log.get('KLt', 0) for log in logging_outputs)\n KLtheta_sum = sum(log.get('KLtheta', 0) for log in logging_outputs)\n\n if 'nll_iw' in logging_outputs[0]:\n nll_iw_sum = sum(log.get('nll_iw', 0) for log in logging_outputs)\n metrics.log_scalar('nll_iw_s', nll_iw_sum / nsentences, \n nsentences, round=3, priority=4)\n metrics.log_scalar('nll_iw_t', nll_iw_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5) \n metrics.log_derived('ppl_iw', lambda meters: utils.get_perplexity(meters['nll_iw_t'].avg), priority=6)\n\n else:\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), \n sample_size, round=3, priority=3)\n\n metrics.log_scalar('neg_elbo_s', neg_elbo_sum / nsentences, \n nsentences, round=3, priority=4)\n metrics.log_scalar('recon_loss_s', recon_loss_sum / nsentences, \n nsentences, round=3, priority=4)\n\n metrics.log_scalar('neg_elbo_t', neg_elbo_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5)\n metrics.log_scalar('recon_loss_t', recon_loss_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5)\n\n metrics.log_scalar('KLz', KLz_sum / nsentences, nsentences, round=1, priority=8)\n metrics.log_scalar('KLt', KLt_sum / nsentences, nsentences, round=1, priority=8)\n metrics.log_scalar('KLtheta', KLtheta_sum / nsentences, nsentences, round=1, priority=8)\n\n metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['neg_elbo_t'].avg), priority=6)\n metrics.log_derived('recon_ppl', lambda meters: utils.get_perplexity(meters['recon_loss_t'].avg), priority=7)\n\n if 'active' in logging_outputs[0]:\n metrics.log_scalar('active', logging_outputs[0]['active'], weight=0, round=1, priority=10)\n metrics.log_scalar('percent', logging_outputs[0]['percent'], weight=0, round=2, priority=10)\n # metrics.log_scalar('nlow', logging_outputs[0]['nlow'], weight=0, priority=10)\n # metrics.log_scalar('nhigh', logging_outputs[0]['nhigh'], weight=0, priority=10)", "def log_prior_grad(self, inputs):", "def grad_softmax_crossentropy_with_logits(logits,reference_answers):\r\n ones_for_answers = np.zeros_like(logits)\r\n ones_for_answers[np.arange(len(logits)),reference_answers] = 1\r\n exp = np.exp(logits)\r\n if exp.ndim == 1:\r\n exp = vector_conversion(exp, 5)\r\n else:\r\n exp = array_conversion(exp, 5)\r\n sumExp = exp.sum(axis=-1,keepdims=True)\r\n if sumExp.ndim == 1:\r\n sumExp = vector_conversion(sumExp, 5)\r\n else:\r\n sumExp = array_conversion(sumExp, 5)\r\n #softmax = np.exp(logits) / np.exp(logits).sum(axis=-1,keepdims=True)\r\n softmax = exp/sumExp\r\n return (- ones_for_answers + softmax) / logits.shape[0]" ]
[ "0.7227106", "0.7013559", "0.6846142", "0.6814897", "0.6482821", "0.6459679", "0.644783", "0.6419121", "0.63484573", "0.6347733", "0.623671", "0.621699", "0.61936843", "0.6175315", "0.61261624", "0.612563", "0.60965395", "0.6087454", "0.60439134", "0.6013969", "0.6002376", "0.59953135", "0.5986073", "0.5978233", "0.5974903", "0.5965547", "0.59401727", "0.5932356", "0.5898605", "0.586515", "0.5840375", "0.58327264", "0.58263576", "0.58179194", "0.5816632", "0.581168", "0.5807003", "0.5806351", "0.58040786", "0.5790218", "0.5781884", "0.577966", "0.57766783", "0.5774159", "0.57581216", "0.5757473", "0.5755254", "0.57215536", "0.5713442", "0.5698055", "0.56939197", "0.56856143", "0.568304", "0.56799376", "0.5669191", "0.5665885", "0.5654791", "0.5647867", "0.5647867", "0.5647442", "0.56463945", "0.5645223", "0.5642099", "0.56410676", "0.5638208", "0.5629127", "0.56211126", "0.5617022", "0.5613804", "0.56137556", "0.5611424", "0.56077063", "0.56046927", "0.56044155", "0.55962837", "0.55935913", "0.5587857", "0.55875283", "0.5580289", "0.55800074", "0.5579037", "0.55663633", "0.55636555", "0.55621994", "0.5559905", "0.5555943", "0.5550852", "0.55501974", "0.5544628", "0.5544628", "0.55419385", "0.5539772", "0.5538515", "0.5531969", "0.5531872", "0.5531872", "0.5529121", "0.5528362", "0.55281556", "0.55242205" ]
0.5934097
27
Computes logits based on features from the model
def logits_on_features(self, h, batch): batch = batch.to(h.device) # Extract features with the model features = h.view(batch.size, -1) # Log loss logits = self.head(features) return logits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logits(self, model, batch):\n device = list(model.parameters())[0].device\n batch = batch.to(device)\n inputs = batch.inputs\n # Extract features with the model\n features = model(*inputs)\n logits = self.logits_on_features(features, batch)\n return logits", "def logits(self, features: torch.Tensor) -> torch.Tensor:\n return self.temporal_module(features)", "def get_logits(image):\n x = image\n for filters in (32, 64):\n x = tf.layers.conv2d(x, filters, 3)\n x = tf.nn.relu(x)\n x = tf.layers.max_pooling2d(x, 3, 2)\n x = tf.reduce_mean(x, axis=(1, 2))\n logits = tf.layers.dense(x, 10)\n return logits", "def compute_logits(self):\n # [num train labels, num classes] where each row is a one-hot-encoded label.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n\n # Undocumented in the paper, but *very important*: *only* the support set\n # embeddings is L2-normalized, which means that the distance is not exactly\n # a cosine distance. For comparison we also allow for the actual cosine\n # distance to be computed, which is controlled with the\n # `exact_cosine_distance` instance attribute.\n train_embeddings = tf.nn.l2_normalize(\n self.train_embeddings, 1, epsilon=1e-3)\n test_embeddings = self.test_embeddings\n if self.exact_cosine_distance:\n test_embeddings = tf.nn.l2_normalize(test_embeddings, 1, epsilon=1e-3)\n # [num_test_images, num_train_images]\n similarities = tf.matmul(\n test_embeddings, train_embeddings, transpose_b=True)\n attention = tf.nn.softmax(similarities)\n\n # [num_test_images, way]\n probs = tf.matmul(attention, one_hot_train_labels)\n self.test_logits = tf.log(probs)\n return self.test_logits", "def forward(self, features):\n activations = {}\n for index, layer in enumerate(self.layers):\n if index == 0:\n activations[index] = layer(features)\n else:\n activations[index] = layer(activations[index - 1])\n logits = activations[len(activations) - 1]\n return logits", "def forward(self, x):\n clf_tokens_mask = (x.transpose(0, 1).contiguous().to('cpu') == self.tokenizer.vocab['[CLS]'])\n hidden_states = self.transformer(x)\n\n lm_logits = self.lm_head(hidden_states)\n clf_tokens_states = (hidden_states * clf_tokens_mask.unsqueeze(-1).float()).sum(dim=0)\n clf_logits = self.classification_head(clf_tokens_states)\n\n return lm_logits, clf_logits", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def logistic_regression_model_by_features(xTrain, yTrain, features, iter_step, resolution, initial_w0, step, max_iters):\r\n\r\n model = lgm.LogisticRegressionModel(initial_w0=initial_w0,\r\n initial_weights=[0.0] * len(features))\r\n\r\n # Extend xTrains and xTest with 1 at [0]\r\n xTrain = [[1] + x for x in xTrain]\r\n\r\n for i, iters in enumerate([iter_step] * resolution):\r\n fit_tic = time.time()\r\n model.fit(xTrain, yTrain, iterations=iters, step=step)\r\n fit_toc = time.time() - fit_tic\r\n iter_cnt = iter_step * (i + 1)\r\n print(\"Took {} sec. Fitted data for {} iterations\".format(fit_toc, iter_cnt))\r\n\r\n return model", "def compute_edge_logits(self):", "def __call__(self,logits):\n \n #sample from Gumbel(0, 1)\n uniform = self._srng.uniform(logits.shape,low=0,high=1)\n gumbel = -T.log(-T.log(uniform + self.eps) + self.eps)\n \n #draw a sample from the Gumbel-Softmax distribution\n return T.nnet.softmax((logits + gumbel) / self.temperature)", "def forward(self, x):\n hidden_states = self.transformer(x)\n logits = self.lm_head(hidden_states)\n\n return logits", "def _get_logits(images,\n model_options,\n outputs_to_num_classes,\n weight_decay=0.0001,\n reuse=tf.AUTO_REUSE,\n is_training=False,\n fine_tune_batch_norm=False):\n features, end_points = _extract_features(\n images,\n model_options,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n # TODO: CHECK\n DEBUG_VARS.aspp_result = features\n if model_options.decoder_output_stride is not None:\n decoder_height = scale_dimension(model_options.crop_size[0],\n 1.0 / model_options.decoder_output_stride)\n decoder_width = scale_dimension(model_options.crop_size[1],\n 1.0 / model_options.decoder_output_stride)\n features = refine_by_decoder(\n features,\n end_points,\n decoder_height=decoder_height,\n decoder_width=decoder_width,\n decoder_use_separable_conv=model_options.decoder_use_separable_conv,\n model_variant=model_options.model_variant,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n outputs_to_logits = {}\n for output in sorted(outputs_to_num_classes):\n outputs_to_logits[output] = _get_branch_logits(\n features,\n outputs_to_num_classes[output],\n model_options.atrous_rates,\n aspp_with_batch_norm=model_options.aspp_with_batch_norm,\n kernel_size=model_options.logits_kernel_size,\n weight_decay=weight_decay,\n reuse=reuse,\n scope_suffix=output)\n\n return outputs_to_logits", "def dnn_logit_fn(features, mode):\n with tf.variable_scope(\n 'input_from_feature_columns',\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner):\n net = tf.feature_column.input_layer(\n features=features, feature_columns=feature_columns)\n for layer_id, num_hidden_units in enumerate(hidden_units):\n with tf.variable_scope(\n 'hiddenlayer_%d' % layer_id, values=(net,)) as hidden_layer_scope:\n net = tf.layers.dense(\n net,\n units=num_hidden_units,\n activation=activation_fn,\n kernel_initializer=tf.glorot_uniform_initializer(),\n name=hidden_layer_scope)\n if dropout is not None and mode == 'train':\n net = tf.layers.dropout(net, rate=dropout, training=True)\n # _add_hidden_layer_summary(net, hidden_layer_scope.name)\n\n with tf.variable_scope('logits', values=(net,)) as logits_scope:\n logits = tf.layers.dense(\n net,\n units=units,\n activation=None,\n kernel_initializer=tf.glorot_uniform_initializer(),\n name=logits_scope)\n # _add_hidden_layer_summary(logits, logits_scope.name)\n\n return logits", "def forward(self, reps):\n assert reps.shape[-1] == N_DIMS_PER_REP\n logits = torch.zeros(len(reps), N_UNIQUE_FEATS)\n logits[:, feat] = 1\n return logits", "def logits(self):\n return np.array([m['actor'] for m in self.model_outs], dtype=np.float32)", "def forward(self, inputs=None, **kwds):\n\n h = inputs\n h = self.feat_drop(h)\n\n for l in range(self.num_layers-1):\n\n h = self.layers[l](h)\n h = self.activation(h)\n \n logits = self.layers[-1](h)\n\n return logits", "def infer_ensemble_logits(features, model, checkpoints, session, num_steps,\n data):\n _, inferred = model.multi_gpu([features], 1)\n logits = []\n saver = tf.train.Saver()\n for checkpoint in checkpoints:\n saver.restore(session, checkpoint)\n for i in range(num_steps):\n logits.append(\n session.run(\n inferred[0].logits,\n feed_dict={\n features['recons_label']: data[i]['recons_label'],\n features['labels']: data[i]['labels'],\n features['images']: data[i]['images'],\n features['recons_image']: data[i]['recons_image']\n }))\n return logits", "def get_logCRF(train, model):\n word = train[0]\n Y = train[1]\n char_count, _ = word.shape\n # calculating forward messages\n alpha = np.zeros((char_count, model.dimY))\n first_term = np.dot(word, model.getW(model.labels))\n second_term = model._T\n for i in range(1, char_count):\n sum_term = (first_term[i-1] + alpha[i-1]) + second_term\n alpha[i] = np.apply_along_axis(logsumexp_trick, 1, sum_term) \n # getting logZ from messages\n logZ = logsumexp_trick(first_term[char_count-1]+alpha[char_count-1])\n w_term = np.sum(model.getW(Y).transpose() * word) # $\\sum_{j=1}^m {W_{yj} . x_j}$\n t_term = np.sum(model.getT(Y[:-1], Y[1:])) #$T_{yj, yj+1}\n value = -logZ + w_term + t_term\n return value", "def train(self, documents):\n prior_log_prob, label_to_col = self.get_prior_log_probabilities(documents)\n self.my_model[\"vocabulary\"] = make_vocabulary(documents)\n\n # find frequencies of features\n num_classes = len(label_to_col)\n num_features = len(self.extract_f_vector(documents[0]))\n features_freq = np.zeros((num_features, num_classes))\n for doc in documents:\n f_vector = self.extract_f_vector(doc)\n col_for_f_vector = label_to_col[doc.label]\n features_freq[:, col_for_f_vector] += f_vector\n\n # laplace smoothing\n total_per_label = np.sum(features_freq, axis=0)\n features_freq += np.ones(total_per_label.shape, int)\n normalizer = total_per_label + np.full(total_per_label.shape, num_features, int)\n features_freq /= normalizer\n\n # stack all probabilities to one matrix and take log\n # result: self.all_log_prob\n # |-----------------------------------|\n # | log P(f1|C1) | ... | log P(f1|Cn) |\n # | log P(f2|C1) | ... | log P(f2|Cn) |\n # | . | . | . |\n # | . | . | . |\n # | . | . | . |\n # | log P(fm|C1) | ... | log P(fm|Cn) |\n # | log P(C1) | ... | log P(Cn) |\n # |-----------------------------------|\n likelihood_log_prob = np.log(features_freq)\n all_log_prob = np.vstack((likelihood_log_prob, prior_log_prob))\n self.my_model[\"all_log_prob\"] = all_log_prob", "def compute_logits(self):\n # [num test images, 1, embedding size].\n test_embeddings = tf.expand_dims(self.test_embeddings, 1)\n\n # [1, num_clases, embedding_size].\n prototypes = tf.expand_dims(self.prototypes, 0)\n\n # Squared euclidean distances between each test embedding / prototype pair.\n distances = tf.reduce_sum(tf.square(test_embeddings - prototypes), 2)\n self.test_logits = -distances\n return self.test_logits", "def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)\n alpha = 1e0\n # beta = 1e-4\n for epoch in range(8):\n loss = 0.\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \\\n # + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += (predict == y)\n grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w\n lr.w = lr.w - alpha * grad\n print(\"epoch {:d}, loss: {:f}, accuracy: {:f}\".format(epoch, loss / len(train_exs), acc / len(train_exs)))\n\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print(\"training loss: {:f}\".format(loss / len(train_exs)))\n\n return lr", "def forward(self, x):\n\n embeds = self.dvector(x)\n logits = self.linear(embeds)\n\n return logits", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n logits = torch.softmax(x, dim=1)\n return logits", "def build_linear_model(inputs, columns, config):\r\n features = inputs['features']\r\n\r\n cols_to_vars = {}\r\n units = int(config['linear_model'].get('units', 1))\r\n combiner = config['linear_model'].get('combiner', 'sum')\r\n linear_logits = tf.feature_column.linear_model(\r\n features=features,\r\n feature_columns=columns,\r\n units=units,\r\n sparse_combiner=combiner,\r\n cols_to_vars=cols_to_vars)\r\n\r\n return linear_logits", "def logistic(weights, data, targets, hyperparameters):\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n df = np.reshape(df, ((len(df), 1)))\n\n return f, df, np.reshape(y, (len(y), 1))", "def estimate_logreg(x,y,N_its,learning_rate=1e-4,regularizer=1e-2,lazy_reg=True):\n weights = defaultdict(float)\n weight_hist = [] #keep a history of the weights after each iteration\n all_labels = set(y)\n \n # this block is for lazy regularization\n ratereg = learning_rate * regularizer\n def regularize(base_feats):\n for base_feat in base_feats:\n for label in all_labels:\n #print \"regularizing\",(label,base_feat),t,last_update[base_feat],(1. - ratereg) ** (t-last_update[base_feat])\n weights[(label,base_feat)] *= (1. - ratereg) ** (t-last_update[base_feat])\n last_update[base_feat] = t\n\n t = 0\n last_update = defaultdict(int)\n\n eeta = learning_rate\n\n for it in xrange(N_its):\n\n for i,(x_i,y_i) in enumerate(zip(x,y)): #keep\n t += 1\n\n # regularization\n if lazy_reg: # lazy regularization is essential for speed\n regularize(x_i) # only regularize features in this instance\n if not lazy_reg: # for testing/explanatory purposes only\n for feat,weight in weights.iteritems():\n if feat[1] is not OFFSET: # usually don't regularize offset\n weights[feat] -= ratereg * weight\n\n p_y = compute_py(x_i,weights,all_labels) #hint\n\n term2 = make_feature_vector(x_i, y_i)\n\n for key in term2.keys():\n weights[key] = weights[key] + (term2[key]*eeta)\n\n for label in all_labels:\n temp = make_feature_vector(x_i, label)\n for key in temp.keys():\n weights[key] = weights[key] - (temp[key]*eeta*p_y[label])\n\n\n print it,\n weight_hist.append(weights.copy()) \n\n # if lazy, let regularizer catch up\n if lazy_reg:\n # iterate over base features\n regularize(list(set([f[1] for f in weights.keys() if f[1] is not OFFSET])))\n\n return weights,weight_hist", "def logreg(mode, vectorizer, training_dir):\n # 1. load the training dataset\n NORMALIZE = True\n pre_load = True\n\n logging.basicConfig(level=logging.INFO)\n logging.info(\"loading training dataset\")\n if not pre_load:\n x, y_age, y_gender, y_occ, cid = \\\n load_dataset(training_dir, mode, vectorizer)\n\n x_train = x[0:TRAIN_COUNT, :]\n\n y_train_age = y_age[0:TRAIN_COUNT]\n y_train_gender = y_gender[0:TRAIN_COUNT]\n y_train_occ = y_occ[0:TRAIN_COUNT]\n\n x_test = x[TRAIN_COUNT:TRAIN_COUNT+TEST_COUNT, :]\n y_test_age = y_age[TRAIN_COUNT:]\n y_test_gender = y_gender[TRAIN_COUNT:]\n y_test_occ = y_occ[TRAIN_COUNT:]\n cid = cid[TRAIN_COUNT:]\n\n if NORMALIZE:\n x_train = normalize(x_train, axis=1, norm='l1')\n x_test = normalize(x_test, axis=1, norm='l1')\n\n data_path = 'data/loaded_data.npz'\n with open(data_path, 'wb') as f:\n pickle.dump([x_train, y_train_age, y_train_gender, y_train_occ, x_test, y_test_age, y_test_gender, y_test_occ, cid], f)\n\n else:\n data_path = 'data/loaded_data.npz'\n if os.path.isfile(data_path):\n with open(data_path, 'rb') as f:\n x_train, y_train_age, y_train_gender, y_train_occ, x_test, y_test_age, y_test_gender, y_test_occ, cid = pickle.load(f)\n # exit()\n # 2. train models\n y_train_age = [x if isinstance(x, int) else 0 for x in y_train_age]\n y_test_age = [x if isinstance(x, int) else 0 for x in y_test_age]\n logging.info(\"fitting model age\")\n # age_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n # age_model = SVC()\n # age_model = DecisionTreeClassifier()\n age_model = RandomForestClassifier(n_estimators=15)\n # age_model = MultinomialNB()\n age_model.fit(x_train, y_train_age)\n logging.info(\"fitting model gender\")\n # gender_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n # gender_model = SVC(verbose=True, C=10, class_weight={0: 10, 1:1})\n # gender_model = DecisionTreeClassifier()\n gender_model = RandomForestClassifier(n_estimators=15)\n # gender_model = MultinomialNB()\n gender_model.fit(x_train, y_train_gender)\n logging.info(\"fitting model acc\")\n # occ_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n # occ_model = SVC(verbose=True)\n # occ_model = DecisionTreeClassifier()\n occ_model = RandomForestClassifier(n_estimators=15)\n # occ_model = MultinomialNB()\n occ_model.fit(x_train, y_train_occ)\n\n # 3. load the test dataset\n logging.info(\"loading test dataset ...\")\n # x_test, y_test_age, y_test_gender, y_test_occ, cid = \\\n # load_dataset(test_dir, mode, vectorizer)\n\n # 4. Predict and Evaluate\n logging.info(\"predicting\")\n age_pred = age_model.predict(x_test)\n gender_pred = gender_model.predict(x_test)\n occ_pred = occ_model.predict(x_test)\n\n # gender_pred = gender_model.predict(x_train)\n # occ_pred = occ_model.predict(x_train)\n output_labels = [{\"id\": i, \"occupation\": inv_o_dict[o], \"gender\": inv_g_dict[g], \"birthyear\": int(a) }\n for i, o, g, a in zip(cid, occ_pred, gender_pred, age_pred)]\n # output_labels = [{\"id\": i, \"gender\": inv_g_dict[g], \"occupation\": inv_o_dict[o]}\n # for i, g, o in zip(cid, gender_pred, occ_pred)]\n\n if not os.path.isdir('./results'):\n os.makedirs('./results')\n\n open(\"./results/all-predictions.ndjson\", \"w\").writelines(\n [json.dumps(x) + \"\\n\" for x in output_labels]\n )\n\n pred_dict = {\"prediction\": output_labels[0:10]}\n with open('./results/pred.json', 'w') as outfile:\n json.dump(pred_dict, outfile)\n\n gt_labels = [{\"id\": i, \"occupation\": inv_o_dict[o], \"gender\": inv_g_dict[g], \"birthyear\": int(a) }\n for i, o, g, a in zip(cid, y_test_occ, y_test_gender, y_test_age)]\n gt_dict = {\"ground_truth\": gt_labels[0:10]}\n with open('./results/gt.json', 'w') as outfile:\n json.dump(gt_dict, outfile)\n\n # saving trained models\n if not os.path.isdir(\"./pretrained-models\"):\n os.makedirs(\"./pretrained-models\")\n\n pickle.dump(age_model, open(\"./pretrained-models/age-model\", 'wb'))\n pickle.dump(gender_model, open(\"./pretrained-models/gender-model\", 'wb'))\n pickle.dump(occ_model, open(\"./pretrained-models/occ-model\", 'wb'))\n\n print(\"Accuracy for age model: {:.2f}%\".format(accuracy_score(age_pred, y_test_age) * 100.0))\n\n print(\"Accuracy for gender model: {:.2f}%\".format(accuracy_score(gender_pred, y_test_gender) * 100.0))\n\n print(\"Accuracy for occupation model: {:.2f}%\".format(accuracy_score(occ_pred, y_test_occ) * 100.0))", "def forward(self, x):\n return F.log_softmax(self.proj(x), dim=-1)", "def forward(self, logits, temperature):\n flat = logits.view(logits.shape[:-2] + (-1,))\n weights = F.softmax(flat / temperature, dim=-1).view_as(logits)\n\n x = (weights.sum(-2) * torch.linspace(-1, 1, logits.shape[-1]).type_as(logits)).sum(-1)\n y = (weights.sum(-1) * torch.linspace(-1, 1, logits.shape[-2]).type_as(logits)).sum(-1)\n\n return torch.stack((x, y), -1), weights", "def forward(self, X, labels):\n features = self.get_conv_feats(X)\n W = self.W\n T = self.T\n log_prob = CRFautograd.apply(W, T, features, labels)\n return log_prob", "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def logit_features(data, columns, upper_bound=1):\n for col in columns:\n\n if upper_bound != 1:\n print('Rescaling data...')\n data[col] = data[col] / upper_bound\n\n # deal with 0/1 values\n if np.sum(data[col].isin([0, 1])) > 0:\n print('Replacing 0s with 0.025, 1s with 0.925...')\n data.loc[data[col] == 0, col] = 0.025\n data.loc[data[col] == 1, col] = 0.925\n\n data[col] = sp.special.logit(data[col])", "def train_logistic_regression(x_train, y_train, learning_rate, fit_intercept=False, max_iter=500):\r\n if fit_intercept:\r\n intercept = np.ones(x_train.shape[0], 1)\r\n x_train = np.hstack((intercept, x_train)) # hstacks merges 2 arrays column wise\r\n weights = np.zeros(x_train.shape[1])\r\n for iteration in range(max_iter):\r\n weights = update_weights(x_train, y_train, weights, learning_rate)\r\n # printing cost for every 100 iterations\r\n if iteration % 100 == 0:\r\n print(calculate_cost(x_train, y_train, weights))\r\n return weights", "def call(self,\n points: tf.Tensor,\n training: Optional[bool] = None) -> tf.Tensor: # pylint: disable=arguments-differ\n features = self.encoder(points, training) # (B,1024)\n logits = self.classifier(features, training) # (B,num_classes)\n return logits", "def inference(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n # Shapes of layers\n W_shapes = [self.input_dim] + self.n_hidden + [self.n_classes]\n W_shapes = [(W_shapes[i], W_shapes[i + 1]) for i in range(len(W_shapes) - 1)]\n\n Z = x\n for layer_num, shape in enumerate(W_shapes):\n layer_name = 'dense_{}'.format(layer_num)\n Z = self._dense_layer(inputs=Z, W_shape=shape, scope_name=layer_name)\n\n logits = Z\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return logits", "def add_logits_op(self):\n with tf.variable_scope(\"bi-lstm\"):\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_embeddings,\n sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout)\n\n with tf.variable_scope(\"proj\"):\n W = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[2*self.config.hidden_size_lstm, self.config.ntags])\n\n b = tf.get_variable(\"b\", shape=[self.config.ntags],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps = tf.shape(output)[1]\n output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])\n pred = tf.matmul(output, W) + b\n self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])", "def logreg(mode, vectorizer, training_dir, test_dir):\n # 1. load the training dataset\n logging.basicConfig(level=logging.INFO)\n logging.info(\"loading training dataset\")\n x_train, y_train_age, y_train_gender, y_train_occ, _ = \\\n load_dataset(training_dir, mode, vectorizer)\n\n # 2. train models\n logging.info(\"fitting model age\")\n age_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n age_model.fit(x_train, y_train_age)\n logging.info(\"fitting model gender\")\n gender_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n gender_model.fit(x_train, y_train_gender)\n logging.info(\"fitting model acc\")\n occ_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n occ_model.fit(x_train, y_train_occ)\n\n # 3. load the test dataset\n logging.info(\"loading test dataset ...\")\n x_test, y_test_age, y_test_gender, y_test_occ, cid = \\\n load_dataset(test_dir, mode, vectorizer)\n\n # 4. Predict and Evaluate\n logging.info(\"predicting\")\n age_pred = age_model.predict(x_test)\n gender_pred = gender_model.predict(x_test)\n occ_pred = occ_model.predict(x_test)\n output_labels = [{\"id\": i, \"birthyear\": int(a), \"gender\": inv_g_dict[g], \"occupation\": inv_o_dict[o]}\n for i, a, g, o in zip(cid, age_pred, gender_pred, occ_pred)]\n\n open(\"labels.ndjson\", \"w\").writelines(\n [json.dumps(x) + \"\\n\" for x in output_labels]\n )", "def logits_placeholder(self):", "def logistic(weights, data, targets, hyperparameters):\n # TODO: Finish this function\n n_data = len(data)\n dim_data = len(data[0])\n\n f = 0\n y = logistic_predict(weights, data)\n\n data = mod_data(data)\n\n # dl/dw_j = SUM(x_ij * (t_i - (1 - sigmoid(z))))\n df = np.dot(data.T, (1.0 * targets) - (1 - y))\n\n # to calculate f, we need to sum the negative log of all y iff target is 0 and (1-y) iff target is 1\n f = -1.0 * np.dot(targets.T, np.log(1 - y)) - 1.0 * np.dot(1 - targets.T, np.log(y))\n\n # calculate P(C=0|x_i) for all x_i \n return f[0,0], df, y", "def predict_logit(self, x):\n with torch.no_grad():\n y_ = self.tr_model(x)\n return y_", "def forward(self, inputs):\n if len(inputs) < self.config.train.batch_size:\n x = torch.zeros(self.config.train.batch_size, dtype=torch.long, requires_grad=False)\n x[:len(inputs)] = inputs\n inputs = x\n print(\"Padded:\", inputs)\n # print(\"input idx shp:\", inputs.shape)\n # print(\"embeddings shp:\", self.embeddings.shape)\n input_tokens = torch.LongTensor(self.embeddings[inputs, :])\n input_mask = torch.LongTensor(self.masks[inputs, :])\n # print(\"input tokens shp:\", input_tokens.shape)\n logits = self.model(input_tokens, attention_mask=input_mask)[0]\n # print(logits)\n # print(\"Check https://mc.ai/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic/ for potential training required stuff\")\n return logits", "def classifier(model):\n \n model.classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(model.classifier[0].in_features, 4096)),\n ('fc2', nn.Linear(4096, 102)),\n ('relu', nn.ReLU()),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n return model", "def predict_on_features(self, h):\n logits = self.head(h.view(h.size(0), -1))\n log_probs = F.log_softmax(logits, dim=-1)\n return log_probs, logits.argmax(dim=-1)", "def logistic(self,w,Xi):\n # print(w.T)\n # print(Xi)\n a = np.dot(w.T,Xi)\n return 1/(1+np.exp(-a))", "def run_logistic_regression(training, testing, feature_cols, outcome_col):\n if 'intercept' not in training.columns:\n training['intercept'] = 1\n if 'intercept' not in testing.columns:\n testing['intercept'] = 1\n intercept_feature_cols = feature_cols + ['intercept']\n logit = sm.Logit(training[outcome_col], training[intercept_feature_cols])\n fitted_logit_model = logit.fit()\n logit_diagnostics = get_diagnostics(testing[outcome_col], testing[intercept_feature_cols], fitted_logit_model, model_type = 'logit')\n predicted_logit_probs = fitted_logit_model.predict(testing[intercept_feature_cols])\n\n return fitted_logit_model, logit_diagnostics, predicted_logit_probs", "def logistic_regression(X, y, fold_number=10, iteration=1000):\n \n # add additional dimension and set y=-1 if y==0\n X['x0'] = 1\n y[y==0] = -1\n \n # data preparation\n D = X.shape[1]\n fold = KFold(n_splits=fold_number)\n eta = 0.01 / 4600\n \n # record 10 output\n loss_function_list = []\n w_list = []\n \n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n length = X_train.shape[0]\n w = np.zeros(D) # initialize w\n loss_function = []\n for ite in range(iteration+1): \n gradient = sum((1-expit(y_train.values[i]*X_train.values[i].dot(w)))*y_train.values[i]*X_train.values[i] for i in range(length))\n loss_function.append(sum(np.log(expit(y_train.values[i]*X_train.values[i].dot(w))) for i in range(length)))\n w += eta * gradient\n w_list.append(w)\n loss_function_list.append(loss_function)\n \n return w_list, loss_function_list", "def logistic_train_metrics(df):\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n model_reg = dill.load(open('maa_conflict_model.dill', 'rb'))\n\n return model_reg", "def forward(self, t):\n x = self.embeddings(t)\n logits = self.model(x.view(x.shape[0], -1))\n return logits", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n\tif len(initial_w.shape)==2:\n\t\tinitial_w = initial_w.reshape((max(initial_w.shape)))\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\n\tw = logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma)\n\t\n\tloss = calculate_nll(y, tx, w)\n\n\treturn w, loss", "def fit(self, x, y):\n # Note Logistic Regression Runtime\n start_time = time.time()\n\n # Converting Pandas DataFrame to Numpy arrays\n if not type(x).__module__ == np.__name__:\n x = x.to_numpy()\n if not type(y).__module__ == np.__name__:\n y = y.to_numpy()\n\n # Insert a column of 1 in the feature vector X for the bias term in the weights\n x = np.insert(x,0,1,axis=1)\n \n # Verify dimension of input\n if len(x) != len(y):\n print(\"The number of input features vector must be to be the same as the number of target variables\")\n else:\n losses = self.gradient_descent(x,y)\n\n # Note end time\n end_time = time.time()\n\n # Log runtime\n print(\"Logistic Regression training time: {0:.2f}s\".format(end_time - start_time))\n \n return losses", "def forward(self, x):\n for layer in self.hidden_layers:\n x = F.relu(layer(x))\n x = self.dropout(x)\n x = self.output(x)\n\n return F.log_softmax(x, dim=1)", "def sklearn_model(train_data):\n X, y = train_data\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = LogisticRegression(\n multi_class=\"multinomial\", solver=\"lbfgs\", max_iter=1000\n )\n model.fit(X, y)\n return model", "def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)", "def fit_logistic_regression():\n\n logger.debug(\"Running the fit_logistic_regression function now\")\n\n #Loading the configuration\n with open(os.path.join(\"config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Loading and pre processing the data\n logger.debug(\"Loading and pre processing the data\")\n train_df = load_data(config[\"load_data\"][\"train_file\"])\n train_df = pre_process_data(train_df, resample = True, resample_count = 500000)\n\n #Defining Pipeline\n pipeline = Pipeline([\n ('tfidf', TfidfVectorizer(analyzer='word', token_pattern=r'[A-Za-z0-9@-]+')),\n ('model', LogisticRegression(random_state=12345, verbose = 1, solver = 'saga')),\n ])\n\n #Defining parameters to vary\n parameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__max_features': (None, 5000, 10000, 50000),\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'model__C': (0.01, 1, 100)\n }\n\n scoring_list = [\"accuracy\", \"f1\", \"precision\", \"recall\", \"roc_auc\"]\n \n #Performing 5fold CV to determine best hyperparameters\n model = GridSearchCV(pipeline, parameters, cv=5,\n n_jobs=-1, verbose=1, scoring=scoring_list, refit='f1',)\n\n t0 = datetime.datetime.now()\n\n model.fit(train_df[\"Review\"].tolist(), train_df[\"Ratings\"].to_numpy())\n \n logger.info(\"Grid Search performed in {}\".format(str(datetime.datetime.now()-t0)))\n\n #Saving results\n res_df = pd.DataFrame(model.cv_results_)\n res_df.to_csv(os.path.join(config[\"summary_stats\"][\"save_location\"], \"LogisticRegressionResults.csv\"))\n \n #Saving the model\n pickle.dump(model, open(os.path.join(config[\"models\"][\"save_location\"], \"LogisticRegression.pkl\"),'wb'))\n\n return", "def log_forward_computations(self, x: list): \n n_x = len(x)\n \n # log_f_x initialized to -Inf because log(0) = -Inf\n log_f_x = np.zeros((self.n_states, n_x)) + logzero()\n x_emission_scores = np.array([self.scores['emission'][:, self.word_to_pos[w] if w in list(self.word_to_pos.keys()) else self.word_to_pos['UnknownWord']] for w in x]).T\n \n log_f_x[:,0] = x_emission_scores[:, 0] + self.scores['initial']\n \n for i in range(1,n_x):\n for s in range(self.n_states):\n log_f_x[s,i] = logsumexp(self.scores['transition'][s,:] + \n log_f_x[:,i-1]) + x_emission_scores[s, i]\n\n \n log_likelihood = logsumexp(self.scores['final'] + log_f_x[:,-1])\n \n return log_f_x, log_likelihood", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def forward(self, x):\n out = self.net(x)\n out = self.avg(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n\n return func.log_softmax(out, dim=-1)", "def extract_features(self, inputs):\n x = self.conv1(inputs)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.maxpool2(x)\n\n x = self.inception3a(x)\n x = self.inception3b(x)\n x = self.maxpool3(x)\n x = self.inception4a(x)\n\n x = self.inception4b(x)\n x = self.inception4c(x)\n x = self.inception4d(x)\n\n x = self.inception4e(x)\n x = self.maxpool4(x)\n x = self.inception5a(x)\n x = self.inception5b(x)\n x = self.avgpool(x)\n x = torch.flatten(x,1)\n x = self.dropout(x)\n return x", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)\n loss = compute_loss_log(y, tx, w)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n \n return w, loss", "def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def forward(self, x):\n x = self.feature_extractor(x)\n batch_size, hidden = x.size()\n\n x = self.layer_1(x)\n x = torch.relu(x)\n x = self.layer_2(x)\n x = torch.relu(x)\n x = self.layer_3(x)\n\n x = torch.log_softmax(x, dim=1)\n return x", "def call_features(self, inputs):\n result = self.embedding(inputs)\n inception = []\n for conv, pool, flat in zip(\n self.convolutions, self.pooling, self.flatten\n ):\n tmp = conv(result)\n tmp = pool(tmp)\n tmp = flat(tmp)\n inception.append(tmp)\n result = self.concat(inception)\n result = self.dense1(result)\n result = self.dropout1(result)\n result = self.dense2(result)\n return result", "def forward(self, logits, labels, loss_type='softmax'):\n # self.epsilon = 0.1 #labelsmooth\n beta = self.beta\n gamma = self.gamma\n\n no_of_classes = logits.shape[1]\n samples_per_cls = torch.Tensor(\n [sum(labels == i) for i in range(logits.shape[1])])\n if torch.cuda.is_available():\n samples_per_cls = samples_per_cls.cuda()\n\n effective_num = 1.0 - torch.pow(beta, samples_per_cls)\n weights = (1.0 - beta) / ((effective_num) + 1e-8)\n\n weights = weights / torch.sum(weights) * no_of_classes\n labels = labels.reshape(-1, 1)\n\n weights = torch.tensor(weights.clone().detach()).float()\n\n if torch.cuda.is_available():\n weights = weights.cuda()\n labels_one_hot = torch.zeros(\n len(labels), no_of_classes).cuda().scatter_(1, labels, 1).cuda()\n\n labels_one_hot = (1 - self.epsilon) * labels_one_hot + \\\n self.epsilon / no_of_classes\n weights = weights.unsqueeze(0)\n weights = weights.repeat(labels_one_hot.shape[0], 1) * labels_one_hot\n weights = weights.sum(1)\n weights = weights.unsqueeze(1)\n weights = weights.repeat(1, no_of_classes)\n\n if loss_type == \"focal\":\n cb_loss = focal_loss(labels_one_hot, logits, weights, gamma)\n elif loss_type == \"sigmoid\":\n cb_loss = F.binary_cross_entropy_with_logits(\n input=logits, target=labels_one_hot, pos_weight=weights)\n elif loss_type == \"softmax\":\n pred = logits.softmax(dim=1)\n cb_loss = F.binary_cross_entropy(\n input=pred, target=labels_one_hot, weight=weights)\n return cb_loss", "def train_logistic_regression(X_train_input, y_train_input, C=1):\r\n from sklearn.linear_model import LogisticRegression\r\n logr_clf = LogisticRegression(C=C)\r\n logr_clf.fit(X_train_input, y_train_input)\r\n return logr_clf", "def forward(self, *inputs):\n\n x = self.relu1(self.maxpool1(self.conv1(*inputs)))\n x = self.relu2(self.maxpool2(self.conv2_drop(self.conv2(x))))\n x = x.view(x.size(0), -1)\n x = self.relu3(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n return self.log_softmax(x)", "def calculateCoefficientsTrainExp(np.ndarray[double, ndim=2, mode=\"c\"] x_logs not None, np.ndarray[double, ndim=2, mode=\"c\"] derivatives not None, np.ndarray[double, ndim=1] x_log_eigenvals not None, np.ndarray[double, ndim=2, mode=\"c\"] coefficients not None):\n cdef int n, dd, d\n\n n, dd = x_logs.shape[0], x_logs.shape[1]\n d = np.sqrt(dd)\n \n\n out = c_calculateCoefficientsTrainExp (&x_logs[0,0], &derivatives[0,0], &x_log_eigenvals[0], &coefficients[0,0], n, dd, d)\n\n return out", "def feature_mapped_logistic_regression(power, l):\n df = pd.read_csv('ex2data2.txt', names=['test1', 'test2', 'accepted'])\n x1 = np.array(df.test1)\n x2 = np.array(df.test2)\n y = general.get_y(df)\n\n X = feature_mapping(x1, x2, power, as_ndarray=True)\n theta = np.zeros(X.shape[1])\n\n res = opt.minimize(fun=regularized_cost,\n x0=theta,\n args=(X, y, l),\n method='TNC',\n jac=regularized_gradient)\n final_theta = res.x\n\n return final_theta", "def train(self):\n feature = Feature(trained=False)\n classifier = LogisticRegression(\n penalty='l2',\n max_iter=100,\n solver='liblinear',\n random_state=self.RAND_SEED)\n\n true_labels = []\n predicted_labels = []\n\n for subj in self.subjects:\n print(subj)\n # preprocess training and testing set\n self.dataset_gen(subject=subj, valid=False)\n\n # train and predict\n pipeline_steps = [('vectorized', feature.vector)]\n if self.istfidf:\n pipeline_steps.append(('tf-idf', feature.tfidftransform))\n if self.islda == 'small':\n pipeline_steps.append(('lda', feature.ldatransform_small))\n elif self.islda == 'large':\n pipeline_steps.append(('lda', feature.ldatransform_large))\n else:\n pass\n if self.isnorm:\n pipeline_steps.append(('scalar', StandardScaler(with_mean=False)))\n pipeline_steps.append(('clf', classifier))\n model = Pipeline(steps=pipeline_steps)\n\n model.fit(self.X_train, self.y_train)\n\n predicted = model.predict(self.X_test)\n # hamming\n predicted_labels.append(predicted)\n true_labels.append(self.y_test)\n\n true_matrix, pred_matrix = np.array(true_labels, int).T, np.array(predicted_labels, int).T\n true_matrix[true_matrix == -1] = 0\n pred_matrix[pred_matrix == -1] = 0\n\n evaluation = Evaluation(self.subjects)\n evaluation.model_evaluate(true_matrix=true_matrix, pred_matrix=pred_matrix, model_name=self.modelname)", "def forward(self, input):\n x = self.emb(input)\n x = F.avg_pool2d(x, kernel_size=x.shape[2:])\n x = x.view(x.shape[0:2])\n output = F.log_softmax(self.fc_final(x), dim=-1)\n return output", "def forward(self, input):\n if self.dataset_name == 'mnist':\n if self.temp_attn:\n y, attn_weight_list = self.tcanet(input)\n o = self.decoder(y[:, :, -1])\n return F.log_softmax(o, dim=1).contiguous()\n else:\n y = self.tcanet(input)\n o = self.decoder(y[:, :, -1])\n return F.log_softmax(o, dim=1).contiguous()\n emb = self.drop(self.word_encoder(input))\n if self.temp_attn:\n y, attn_weight_list = self.tcanet(emb.transpose(1, 2))\n y = self.decoder(y.transpose(1, 2))\n return y.contiguous(), [attn_weight_list[0], attn_weight_list[self.num_levels // 2], attn_weight_list[-1]]\n else:\n y = self.tcanet(emb.transpose(1, 2))\n y = self.decoder(y.transpose(1, 2))\n return y.contiguous()", "def forward(self, *inputs):\n\n x = functional.relu(functional.max_pool2d(self.conv1(*inputs), 2))\n x = functional.relu(functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = functional.relu(functional.max_pool2d(self.conv3(x), 2))\n x = x.view(x.size(0), -1)\n x = functional.relu(self.fc1(x))\n x = functional.dropout(x, training=self.training)\n x = self.fc2(x)\n return functional.log_softmax(x, dim=1)", "def logistic_pen(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def _bce_loss_with_logits(output, labels, **kwargs):\n return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)", "def forward(self, inp):\n out = self.features(inp)\n out = out.view(out.size(0), -1) # linearized the output of the module 'features'\n out = self.classifier(out)\n return out", "def get_logits(self, hidden_states: torch.FloatTensor,\n temperature: float = 1.0):\n return self.logits(hidden_states) / temperature", "def _get_branch_logits(features,\n num_classes,\n atrous_rates=None,\n aspp_with_batch_norm=False,\n kernel_size=1,\n weight_decay=0.0001,\n reuse=tf.AUTO_REUSE,\n scope_suffix=''):\n # When using batch normalization with ASPP, ASPP has been applied before\n # in _extract_features, and thus we simply apply 1x1 convolution here.\n if aspp_with_batch_norm or atrous_rates is None:\n if kernel_size != 1:\n raise ValueError('Kernel size must be 1 when atrous_rates is None or '\n 'using aspp_with_batch_norm. Gets %d.' % kernel_size)\n atrous_rates = [1]\n\n with arg_scope(\n [conv2d],\n weight_reg=regularizer('l2', weight_decay),\n weight_init=tf.truncated_normal_initializer(stddev=0.01)):\n with tf.variable_scope(_LOGITS_SCOPE_NAME, _LOGITS_SCOPE_NAME, [features], reuse=reuse):\n branch_logits = []\n for i, rate in enumerate(atrous_rates):\n scope = scope_suffix\n if i:\n scope += '_%d' % i\n\n branch_logits.append(\n conv2d(\n features,\n outc=num_classes,\n ksize=[kernel_size, kernel_size],\n ratios=[rate, rate],\n activate=None,\n batch_norm=False,\n use_bias=True,\n name=scope))\n\n return tf.add_n(branch_logits)", "def reshape_logits(output_feature: OutputFeature, logits: torch.Tensor) -> torch.Tensor:\n if isinstance(output_feature, CategoryOutputFeature) and output_feature.num_classes == 2:\n # add logits for the oposite class (LightGBM classifier only returns logits for one class)\n logits = logits.view(-1, 1)\n logits = torch.cat([-logits, logits], dim=1)\n\n return logits", "def log_features(data, columns):\n for col in columns:\n # deal with 0/1 values\n if np.sum(data[col] == 0) > 0:\n print('Replacing 0s with 0.025...')\n data.loc[data[col] == 0, col] = 0.025\n\n data[col] = np.log(data[col])", "def train_logistic_regression(train_x, train_y):\n\n logistic_regression_model = LogisticRegression(penalty='l2', C=1.0)\n logistic_regression_model.fit(train_x, train_y)\n return logistic_regression_model", "def forward(self, input_):\n feature = self.seq(input_)\n mu = self.fc1(feature)\n logvar = self.fc2(feature)\n std = torch.exp(0.5 * logvar)\n return mu, std, logvar", "def compute_logits(self,\n context_features=None,\n example_features=None,\n training=None,\n mask=None):\n\n if not example_features:\n raise ValueError('Need a valid example feature.')\n\n tensor = next(six.itervalues(example_features))\n batch_size = tf.shape(tensor)[0]\n list_size = tf.shape(tensor)[1]\n if mask is None:\n mask = tf.ones(shape=[batch_size, list_size], dtype=tf.bool)\n nd_indices, nd_mask = utils.padded_nd_indices(is_valid=mask)\n\n # Expand context features to be of [batch_size, list_size, ...].\n batch_context_features = {}\n for name, tensor in six.iteritems(context_features):\n x = tf.expand_dims(input=tensor, axis=1)\n x = tf.gather(x, tf.zeros([list_size], tf.int32), axis=1)\n batch_context_features[name] = utils.reshape_first_ndims(\n x, 2, [batch_size, list_size])\n\n batch_example_features = {}\n for name, tensor in six.iteritems(example_features):\n # Replace invalid example features with valid ones.\n padded_tensor = tf.gather_nd(tensor, nd_indices)\n batch_example_features[name] = utils.reshape_first_ndims(\n padded_tensor, 2, [batch_size, list_size])\n\n sparse_inputs, dense_inputs = [], []\n for name in batch_context_features:\n if name in self._sparse_embed_layers:\n sparse_inputs.append(self._sparse_embed_layers[name](batch_context_features[name]))\n else:\n dense_inputs.append(context_features[name])\n for name in batch_example_features:\n if name in self._sparse_embed_layers:\n sparse_inputs.append(self._sparse_embed_layers[name](batch_example_features[name]))\n else:\n dense_inputs.append(batch_example_features[name])\n sparse_inputs = [tf.squeeze(inpt, axis=2) for inpt in sparse_inputs]\n inputs = tf.concat(sparse_inputs + dense_inputs, axis=-1)\n\n scores = self.score(inputs,\n nd_mask,\n training=training)\n scores = tf.reshape(scores, shape=[batch_size, list_size, -1])\n\n # Apply nd_mask to zero out invalid entries.\n # Expand dimension and use broadcasting for filtering.\n expanded_nd_mask = tf.expand_dims(nd_mask, axis=2)\n scores = tf.where(expanded_nd_mask, scores, tf.zeros_like(scores))\n # Remove last dimension of shape = 1.\n try:\n logits = tf.squeeze(scores, axis=2)\n except:\n raise ValueError('Logits not of shape: [batch_size, list_size, 1]. '\n 'This could occur if the `scorer` does not return '\n 'a scalar output.')\n return logits", "def compute_forward_variables(self, normalized_logits, target):\n\n target_length = target.shape[0]\n num_time_steps = normalized_logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n \n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n # init\n alpha = np.zeros((target_length, num_time_steps))\n alpha[0, 0] = normalized_logits[0, blank_label] # where s = 0, t = 0\n alpha[1, 0] = normalized_logits[0, target[0]] # where s = 1, t = 0\n for i in xrange(2, num_time_steps): # for all s >= 2, t = 0\n alpha[i, 0] = 0\n\n # recursive case\n for t in xrange(1, num_time_steps):\n for s in xrange(2, target_length):\n \n a_bar = alpha[s, t-1] + alpha[s-1, t-1] \n\n if l[s] == blank_label or l[s-2] == l[s]:\n alpha[s, t] = normalized_logits[t, l[s]] * a_bar\n else:\n alpha[s, t] = normalized_logits[t, l[s]] * (a_bar + alpha[s-2, t-1])\n return alpha", "def forward(self, obs):\n res = self.hidden_layers(obs)\n logits = self.logits(res)\n value = self.value_branch(res)\n return logits, value", "def train_logistic_regression(train_exs: List[SentimentExample],\n feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n indexer = feat_extractor.get_indexer()\n weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n learning_rate = 0.1\n for i in range(15):\n for ex in train_exs:\n features_of_str = feat_extractor.extract_features(ex.words, False)\n expo = math.exp(np.dot(weights, features_of_str))\n possibility = expo / (1 + expo)\n gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n return LogisticRegressionClassifier(weights, feat_extractor)\n\n # Methods for plotting average training loss\n\n # x = np.arange(0, 14)\n # # learning_rate = 1\n # indexer = feat_extractor.get_indexer()\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, gradient_of_w)\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.01\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.01\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.1\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.1\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n # plt.xlabel('Epochs')\n # plt.ylabel('Average Training Loss')\n # plt.legend(['step size 1', 'step size 0.01', 'step size 0.1'], loc='upper left')\n # plt.show()\n # return LogisticRegressionClassifier(weights, feat_extractor)", "def fit_logistic(data,labels,learning_rate=0.1,max_iteration=1000,target_error=0.1):\n dimension = len(data[0])\n #weight vector - np.array([w1,w2,w3,w4])\n weights = np.random.uniform(low=-0.01,high=0.01,size=(dimension+1))\n iteration = 0\n \n while iteration < max_iteration:\n iteration = iteration + 1\n \n predicted_prob = np.apply_along_axis(predict, 1,data,weights)\n errors = predicted_prob - labels\n \n current_error = np.sum(computeCostV(labels,predicted_prob)) / len(data)\n print(\"Iteration {0}, error:{1}\".format(iteration,current_error))\n #stop the algorithm if target error rate is reached\n if(current_error < target_error):\n break\n \n for j in range(len(weights)):\n sum_term = np.sum([errors[i]*data[i][j-1] if j!=0 else errors[i] for i in range(len(data))])\n weights[j] = weights[j] - learning_rate * sum_term\n \n return weights", "def multi_scale_logits(images,\n model_options,\n image_pyramid,\n outputs_to_num_classes,\n weight_decay=0.0001,\n is_training=False,\n fine_tune_batch_norm=False):\n # Setup default values.\n if not image_pyramid:\n image_pyramid = [1.0]\n\n if model_options.crop_size is None and model_options.add_image_level_feature:\n raise ValueError(\n 'Crop size must be specified for using image-level feature.')\n if model_options.model_variant == 'mobilenet_v2':\n if (model_options.atrous_rates is not None or\n model_options.decoder_output_stride is not None):\n # Output a warning and users should make sure if the setting is desired.\n tf.logging.warning('Our provided mobilenet_v2 checkpoint does not '\n 'include ASPP and decoder modules.')\n\n crop_height = (\n # 514\n model_options.crop_size[0]\n if model_options.crop_size else tf.shape(images)[1])\n crop_width = (\n model_options.crop_size[1]\n if model_options.crop_size else tf.shape(images)[2])\n\n # Compute the height, width for the output logits.\n # default to 16 , i.e. final predictions is [H/16, W/16]\n logits_output_stride = (\n model_options.decoder_output_stride or model_options.output_stride)\n\n logits_height = scale_dimension(\n crop_height,\n max(1.0, max(image_pyramid)) / logits_output_stride)\n logits_width = scale_dimension(\n crop_width,\n max(1.0, max(image_pyramid)) / logits_output_stride)\n\n # Compute the logits for each scale in the image pyramid.\n outputs_to_scales_to_logits = {\n k: {}\n for k in outputs_to_num_classes\n }\n\n for count, image_scale in enumerate(image_pyramid):\n # print('scale is {}'.format(image_scale))\n if image_scale != 1.0:\n scaled_height = scale_dimension(crop_height, image_scale)\n scaled_width = scale_dimension(crop_width, image_scale)\n scaled_crop_size = [scaled_height, scaled_width]\n scaled_images = tf.image.resize_bilinear(\n images, scaled_crop_size, align_corners=True)\n if model_options.crop_size:\n scaled_images.set_shape([None, scaled_height, scaled_width, 3])\n else:\n scaled_crop_size = model_options.crop_size\n scaled_images = images\n\n model_options.crop_size = scaled_crop_size\n outputs_to_logits = _get_logits(\n scaled_images,\n model_options,\n weight_decay=weight_decay,\n reuse=tf.AUTO_REUSE,\n is_training=is_training,\n outputs_to_num_classes=outputs_to_num_classes,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n # Resize the logits to have the same dimension before merging.\n for output in sorted(outputs_to_logits):\n # resize_bilinear requires channel to be one or three\n outputs_to_logits[output] = tf.image.resize_bilinear(\n outputs_to_logits[output], [logits_height, logits_width],\n align_corners=True)\n\n # Return when only one input scale.\n if len(image_pyramid) == 1:\n for output in sorted(outputs_to_num_classes):\n outputs_to_scales_to_logits[output][\n _MERGED_LOGITS_SCOPE] = outputs_to_logits[output]\n return outputs_to_scales_to_logits\n\n # Save logits to the output map.\n for output in sorted(outputs_to_num_classes):\n outputs_to_scales_to_logits[output][\n 'logits_%.2f' % image_scale] = outputs_to_logits[output]\n\n # Merge the logits from all the multi-scale inputs.\n for output in sorted(outputs_to_num_classes):\n # Concatenate the multi-scale logits for each output type.\n all_logits = [\n tf.expand_dims(logits, axis=4)\n for logits in outputs_to_scales_to_logits[output].values()\n ]\n all_logits = tf.concat(all_logits, 4)\n merge_fn = (\n tf.reduce_max\n if model_options.merge_method == 'max' else tf.reduce_mean)\n outputs_to_scales_to_logits[output][_MERGED_LOGITS_SCOPE] = merge_fn(\n all_logits, axis=4)\n\n return outputs_to_scales_to_logits", "def cnn_model_fn(features, labels, mode):\n # Input Layer\n # input layer should be of shape [:, NXCHANNELS, NVCHANNELS, 1]\n # NVCHANNELS: number of velocity bins\n \n NVCHANNELS=64\n NXCHANNELS=64\n \n input_layer = tf.reshape(features[\"x\"], [-1,NXCHANNELS, NVCHANNELS, 1])\n \n # Intermediate Layers are specified in different function \n if USE_TWO_LAYER:\n flat_layer = two_layers_cnn(input_layer) \n else:\n flat_layer = three_layers_cnn(input_layer)\n \n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 4x4x32]\n # Output Tensor Shape: [batch_size, 32]\n dense = tf.layers.dense(inputs=flat_layer, units=32, activation=tf.nn.relu)\n \n # Add dropout operation; 0.7 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.3, training=mode == tf.estimator.ModeKeys.TRAIN)\n # Logits Layer\n \n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 1]\n if PREDICT_BOTH:\n n_outputs = 2\n else:\n n_outputs = 1\n logits = tf.layers.dense(inputs=dropout, units=n_outputs)\n \n \n if PREDICT_BOTH:\n beta_pred = logits[:,0]\n gamma_pred = logits[:,1]\n predictions = { \"beta_pred\": beta_pred, \"gamma_pred\": gamma_pred }\n else:\n beta_pred = logits[:,0]\n predictions = {\"beta_pred\": beta_pred}\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n \n # Calculate Loss (for both TRAIN and EVAL modes) \n # Simple Mean Squared\n if USE_MEAN_SQUARED:\n if PREDICT_BOTH: \n loss_gamma = tf.losses.mean_squared_error( labels['gamma'] , predictions['gamma_pred'] )\n else:\n loss_gamma = 0.0\n loss_beta = tf.losses.mean_squared_error( labels['beta'] , predictions['beta_pred'] )\n loss = loss_beta + loss_gamma\n else:\n # Compute Weighted Loss\n # Fractional difference from its true value\n ones = tf.ones( tf.shape( labels['beta'] ) , dtype=tf.float64 )\n if PREDICT_BOTH:\n inverse_gamma = tf.div( ones, labels['gamma'] )\n loss_gamma = tf.losses.mean_squared_error( labels['gamma'], predictions['gamma_pred'], weights= inverse_gamma )\n else:\n loss_gamma = 0.0\n inverse_beta = tf.div( ones, labels['beta'] )\n loss_beta = tf.losses.mean_squared_error( labels['beta'], predictions['beta_pred'], weights= inverse_beta )\n loss = loss_beta + loss_gamma \n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n starter_learning_rate = 1.0e-3\n learning_rate = tf.train.exponential_decay(starter_learning_rate, \n tf.train.get_global_step(), 1000000, 0.96, staircase=True)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n if USE_MEAN_SQUARED:\n if PREDICT_BOTH: \n gamma_accuracy = tf.metrics.mean_squared_error( labels['gamma'] , predictions['gamma_pred'] )\n beta_accuracy = tf.metrics.mean_squared_error( labels['beta'] , predictions['beta_pred'] )\n else:\n # Compute Weighted Loss\n # Fractional difference from its true value\n ones = tf.ones( tf.shape( labels['beta'] ) , dtype=tf.float64 )\n if PREDICT_BOTH:\n inverse_gamma = tf.div( ones, labels['gamma'] )\n gamma_accuracy = tf.metrics.mean_squared_error( labels['gamma'], predictions['gamma_pred'], weights= inverse_gamma )\n inverse_beta = tf.div( ones, labels['beta'] )\n beta_accuracy = tf.metrics.mean_squared_error( labels['beta'], predictions['beta_pred'], weights= inverse_beta )\n\n\n if PREDICT_BOTH: \n eval_metric_ops = { \"beta_accuracy\": beta_accuracy, \"gamma_accuracy\": gamma_accuracy }\n else:\n eval_metric_ops = { \"beta_accuracy\": beta_accuracy } \n print(eval_metric_ops)\n\n return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def reg_logistic_regression(y, tx, l, initial_w, max_iters, gamma):\r\n y_resize = (1+y)/2 #rescales target so that -1 values are changed to 0 \r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n\r\n for n_iter in range(max_iters):\r\n grad = calculate_gradient_LR(y_resize, tx, w) + 2*l*w\r\n w = w - gamma*grad\r\n loss = compute_loss_LG(y_resize, tx, w)+ l*np.linalg.norm(w)\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n if (n_iter > 1) and (np.abs(loss_list[-1] - loss_list[-2]) <= 1e-8):\r\n break\r\n return w_list[-1],loss_list[-1]", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def _fit(self):\n\n\t\tclf = LogisticRegression()\n\t\tclf.fit(inputs, labels)\n\n\t\treturn clf", "def forward(self, x, logits=True):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n if not logits:\n x = torch.softmax(x, dim=1)\n return x", "def on_train_begin(self, logs={}):\n self.losses = []\n self.accuracies = []", "def multi_class5_classification_model_logits() -> tf.keras.Model:\n\n # Build model\n model = tf.keras.Sequential(tf.keras.layers.Dense(5, activation=None))\n model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True))\n\n return model", "def forward(self, X, X_lens):\n\n\t\t# get the batch size and sequence length (max length of the batch)\n\t\t# dim of X: batch_size x batch_max_len x input feature vec dim\n\t\tbatch_size, seq_len, _ = X.size()\n\n\t\t###Your code here --\n\t\t# Get the output of LSTM - (output dim: batch_size x batch_max_len x lstm_hidden_dim)\n\t\thid, _ = self.lstm(X)\n\t\t# print('hid \\n{}'.format(hid))\n\n\t\t# reshape (before passing to linear layer) so that each row contains one token\n\t\t# essentially, flatten the output of LSTM\n\t\t# dim will become batch_size*batch_max_len x lstm_hidden_dim\n\t\thid_flat = hid.contiguous().view(hid.shape[0] * hid.shape[1], hid.shape[2])\n\t\t# print('hid_flat \\n{}'.format(hid_flat))\n\n\t\t# Get logits from the final linear layer\n\t\tlogits = self.hidden_to_label(hid_flat)\n\t\t# print('logits \\n {}'.format(logits))\n\t\t# print('logits shape {}, hid shape {}, hid_flat shape {}'.format(logits.shape, hid.shape, hid_flat.shape))\n\t\t# --shape of logits -> (batch_size, seq_len, self.n_output)\n\t\treturn logits", "def log_prior_grad(self, inputs):", "def softmax_crossentropy_with_logits(logits,reference_answers):\r\n logits_for_answers = logits[np.arange(len(logits)),reference_answers]\r\n \r\n xentropy = - logits_for_answers + np.log(np.sum(np.exp(logits),axis=-1))\r\n \r\n return xentropy" ]
[ "0.7213141", "0.6972253", "0.6765194", "0.6761858", "0.67392176", "0.6513034", "0.64970493", "0.64832693", "0.6462769", "0.64080673", "0.6387591", "0.6311425", "0.6272004", "0.62515974", "0.62422395", "0.62104046", "0.62095785", "0.62022495", "0.61950845", "0.6186781", "0.61813974", "0.61476344", "0.61155623", "0.6109147", "0.6101893", "0.60928804", "0.6054881", "0.60447484", "0.60229427", "0.60227734", "0.5984276", "0.5975609", "0.5961815", "0.5925396", "0.5924825", "0.5910992", "0.5908968", "0.5908021", "0.59063464", "0.58996826", "0.5883751", "0.5882642", "0.5880577", "0.58775806", "0.58545715", "0.5842853", "0.58423805", "0.58384585", "0.58379763", "0.5835428", "0.58239484", "0.5820465", "0.5809461", "0.58052707", "0.58043796", "0.5804017", "0.5802773", "0.5796874", "0.5793117", "0.5793055", "0.578975", "0.5781565", "0.5757945", "0.5757603", "0.57544094", "0.5753718", "0.5752546", "0.57419586", "0.5734547", "0.57334346", "0.5727864", "0.5724523", "0.5724491", "0.57228535", "0.5717453", "0.5716471", "0.57158047", "0.5711349", "0.5710951", "0.5709997", "0.57082963", "0.5708024", "0.57073444", "0.5707299", "0.57070106", "0.5698763", "0.5698456", "0.5697062", "0.5689352", "0.5680909", "0.56775963", "0.5674341", "0.56696355", "0.5664942", "0.56619054", "0.5659981", "0.56589955", "0.5658928", "0.56502384", "0.5650133" ]
0.71031106
1
Compute the NLL loss given features h and targets y This assumes that the features have already be computed with the model
def nll_on_features(self, h, batch, reduction="mean"): batch = batch.to(h.device) y = batch.outputs # Extract features with the model features = h.view(batch.size, -1) # Log loss logits = self.head(features) log_probs = F.log_softmax(logits, dim=-1) nll_loss = F.nll_loss(log_probs, y, reduction=reduction) return nll_loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # We are gonna store everythin in a dictionnary hidden\n hidden = {}\n hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:]))\n\n for i in range(self.L):\n idx = i + 1\n # Naming of the variable\n w = self.params['W' + str(idx)]\n b = self.params['b' + str(idx)]\n h = hidden['h' + str(idx - 1)]\n\n # Computing of the forward pass.\n # Special case of the last layer (output)\n if idx == self.L:\n h, cache_h = affine_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n # For all other layers\n else:\n h, cache_h = affine_relu_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n scores = hidden['h' + str(self.L)]\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n # Computing of the loss\n data_loss, dscores = softmax_loss(scores, y)\n reg_loss = 0\n for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:\n reg_loss += 0.5 * self.reg * np.sum(w * w)\n\n loss = data_loss + reg_loss\n\n # Backward pass\n\n hidden['dh' + str(self.L)] = dscores\n for i in range(self.L)[::-1]:\n idx = i + 1\n dh = hidden['dh' + str(idx)]\n h_cache = hidden['cache_h' + str(idx)]\n if idx == self.L:\n dh, dw, db = affine_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n else:\n dh, dw, db = affine_relu_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n\n # w gradients where we add the regulariation term\n list_dw = {key[1:]: val + self.reg * self.params[key[1:]]\n for key, val in hidden.iteritems() if key[:2] == 'dW'}\n # Paramerters b\n list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'}\n # Parameters gamma\n list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'}\n # Paramters beta\n list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'}\n grads = {}\n grads.update(list_dw)\n grads.update(list_db)\n grads.update(list_dgamma)\n grads.update(list_dbeta)\n return loss, grads", "def lossFun(self, inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n # forward pass\n for t in range(len(inputs)):\n xs[t] = np.zeros((self._txt_reader.vocab_size,1)) # One-hot, encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(self._Wxh, xs[t]) + np.dot(self._Whh, hs[t-1]) + self._bh) # compute chidden state\n ys[t] = np.dot(self._Why, hs[t]) + self._by # logits \n ys[t] -= ys[t].max() # for numerical stability\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars\n\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(self._Wxh), np.zeros_like(self._Whh), np.zeros_like(self._Why)\n dbh, dby = np.zeros_like(self._bh), np.zeros_like(self._by)\n\n dhnext = np.zeros_like(hs[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(self._Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(self._Whh.T, dhraw)\n\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def lossFunc(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {} # input, hidden, output, out_prob states for each time t\n hs[-1] = np.copy(hprev)\n loss = 0\n \n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) \n xs[t][inputs[t]] = 1. # convert input to one-hot\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)\n ys[t] = np.dot(Why, hs[t]) + by\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))\n loss += -np.log(ps[t][targets[t],0])\n \n # backward pass\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n # backprop into y\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1\n # backprop into Why, hs, and by\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext\n # backprop through tanh activition\n dhraw = (1 - hs[t] * hs[t]) * dh\n # backprop into Wxh, Whh, hs, and bh\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n # clip gradient preventing exploding\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def lossFun(review, target, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in range(len(review)):\n xs[t] = np.zeros((vector_len,1)) # encode in 1-of-k representation\n for j in range(32):\n xs[t][j] = review[t][j]\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n\n #Many 2 one\n last = len(review) - 1\n ys = np.dot(Why, hs[last]) + by # unnormalized log probabilities for next chars\n ps = np.exp(ys) / np.sum(np.exp(ys)) # probabilities for next chars\n loss = -np.log(ps[target,0]) # softmax (cross-entropy loss)\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n dy = np.subtract(ps,target) # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[last].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n for t in reversed(range(len(review))):\n dhraw = (1 - (hs[t] * hs[t].T)) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[last]", "def loss_fn(self, targets, outputs, model):", "def _compute_loss(self, predictions, targets, **params):\n pass", "def L2(yhat, y):\n loss = np.dot((y - yhat).T,(y - yhat))\n \n return loss", "def calculate_loss(self, y, y_hat):\r\n return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=y_hat))", "def loss(self, y: np.ndarray, y_hat: np.ndarray) -> float:\n losses = -(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))\n return losses.mean() + self.reg / self.num_parameters * (\n (self.v[:, -1] ** 2).sum() + (self.w ** 2).sum()\n )", "def lossFun(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars\n ps[t] = np.exp(ys[t]-np.max(ys[t])) / np.sum(np.exp(ys[t]-np.max(ys[t]))) # probabilities for next chars\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n\n assert_array_equal(van.window_step,t)\n assert_array_equal(van.state[t-1],hs[t-1].T[0])\n assert_array_equal(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]]),hs[t].T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(vantr.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(vantr.outputnet[t].net.elements[1].W.get(),by.T[0])\n\n #\n # #Neg\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(hs[t].T[0]),ps[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ps[t].T[0])\n # assert_array_almost_equal(van.forward(xs[t].T[0]),ps[t].T[0])\n #\n # Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n assert_array_equal(van.outputnet[t].forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],van.state[t-1]])),ys[t].T[0])\n assert_array_equal(van.forward(xs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(ys[t].T[0]),ps[t].T[0])\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n for t in reversed(xrange(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n\n #\n # #Neg\n # van.backward(negLog.dJdy_gradient(ps[t].T[0],to_one_hot_vect(targets[t],vocab_size)),opt)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].dW,dWhy)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].dW,dby.T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n #\n #Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n assert_array_equal(van.outputnet[t].net.forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0])),ps[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n\n err = cross.dJdy_gradient(ys[t].T[0],to_one_hot_vect(targets[t],vocab_size))\n\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(ps[t].T[0]-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(err,dy.T[0])\n\n van.backward(err,opt)\n\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get_dW(),dWhy)\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get_dW(),dby.T[0])\n assert_array_almost_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n #\n\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get_dW(),dWxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get_dW(),dWhh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get_dW(),dbh.T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(van.dJdh[t],dhnext.T[0])\n\n dhnext = np.dot(Whh.T, dhraw)\n\n opt.update_model()\n trainer.learn_window(vantr,zip(to_hot_vect(inputs,vocab_size),to_hot_vect(targets,vocab_size)),crosstr,opttr)\n\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def loss(output, y):\n #Computes softmax cross entropy between logits and labels.\n xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)\n loss = tf.reduce_mean(xentropy)\n\n return loss", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def L1(yhat, y):\n\n loss = np.sum(np.abs(y - yhat))\n \n return loss", "def _discriminator_loss(self, y, y_hat):\n\n l1 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones(tf.shape(y)),logits = y)\n l2 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat)\n l = tf.reduce_mean(l1+l2)\n print('_discriminator_loss shape,', tf.shape(l))\n return l", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def __loss(self, h, y):\n return (-y*np.log(h)-(1-y)*np.log(1-h)).mean()", "def deep_feature_loss(self, y0, y1):\n assert (self.sess is not None) and (not self.sess._closed)\n if not self.vars_loaded:\n print((\"WARNING: `deep_feature_loss` called before loading vars\"))\n feed_dict={self.tensor_wave0: y0, self.tensor_wave1: y1}\n return self.sess.run(self.loss_deep_features, feed_dict=feed_dict)", "def loss_fn(outputs, labels, wts):\n\n # reshape labels to give a flat vector of length batch_size*seq_len\n loss_noreduce = nn.BCEWithLogitsLoss(reduce=False)\n loss = torch.mean(loss_noreduce(outputs, labels)*wts)\n\t\n # compute cross entropy loss for all tokens\n return loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def setup_loss(self, h_s, h_e):\n with vs.variable_scope(\"loss\"):\n # masked_h_s = tf.boolean_mask(h_s, self.context_mask_placeholder)\n # masked_h_e = tf.boolean_mask(h_e, self.context_mask_placeholder)\n # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(h_s, self.start_span_placeholder) +\n # tf.nn.softmax_cross_entropy_with_logits(h_e, self.end_span_placeholder))\n masked_h_s = tf.add(h_s, (1 - tf.cast(self.context_mask_placeholder, 'float')) * (-1e30))\n masked_h_e = tf.add(h_e, (1 - tf.cast(self.context_mask_placeholder, 'float')) * (-1e30))\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(masked_h_s, self.start_span_placeholder) +\n tf.nn.softmax_cross_entropy_with_logits(masked_h_e, self.end_span_placeholder))\n total_loss = loss\n return total_loss, masked_h_s, masked_h_e", "def log_loss_objective(y_true: npt.NDArray, y_pred: npt.NDArray) -> Tuple[npt.NDArray, npt.NDArray]:\n y_pred = sigmoid(y_pred)\n grad = y_pred - y_true\n hess = y_pred * (1.0 - y_pred)\n return grad, hess", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def train(self, X, y):\n h1_input, h1_output, h2_input, h2_output, final_output = self.forwardpass_train(\n X\n )\n # calculate average loss per one data\n train_loss = self.cross_entropy_loss(y, final_output)\n dW1, db1, dW2, db2, dW3, db3 = self.backpropagation(\n X, y, h1_input, h1_output, h2_input, h2_output, final_output\n )\n self.update_weights(dW1, db1, dW2, db2, dW3, db3)\n return train_loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def HuberLoss(x, y, theta, epsilon):\n try:\n x = np.asmatrix(x)\n y = np.asmatrix(y).reshape(-1, 1)\n theta = np.asmatrix(theta).reshape(-1, 1)\n if x.shape[0] != y.shape[0]:\n x = x.transpose()\n except Exception:\n print('There is an error with the input data,\\\n please make sure your x can be transformed into n by m matrix,\\\n your y can be transformed into n by 1 vector,\\\n your theta can be transformed into m by 1 vector')\n sys.exit(0)\n\n n = x.shape[0] # sample size\n fx = x @ theta # matrix (dot) production for estimated y\n error = np.abs(y - fx)\n\n def hl(element):\n if element <= epsilon:\n loss = 1/2 * element**2\n else:\n loss = epsilon * element - 1/2 * epsilon**2\n\n return(loss)\n\n hlvector = np.vectorize(hl)\n\n loss = 1/n * np.sum(hlvector(error))\n\n return(loss)", "def loss(y, y_pred):\n # assert_is_binary(y)\n # assert_is_stochastic(y_pred)\n is_binary(y)\n is_stochastic(y_pred)\n\n # prevent taking the log of 0\n eps = np.finfo(float).eps\n\n # each example is associated with a single class; sum the negative log\n # probability of the correct label over all samples in the batch.\n # observe that we are taking advantage of the fact that y is one-hot\n # encoded!\n cross_entropy = -np.sum(y * np.log(y_pred + eps))\n return cross_entropy", "def get_loss(self, xs, y):\n return nn.SoftmaxLoss(self.run(xs), y)", "def nll(y_true, y_pred):\n return K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.dropout_param is not None:\n self.dropout_param['mode'] = mode \n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n \n ### forward pass ###\n L = self.num_layers\n past_caches = [0 for i in range(L)]\n \n if self.use_dropout:\n dropout_caches = [0 for i in range(L)]\n \n out = X\n if self.use_batchnorm:\n for i in range(L-1):\n\n out, past_caches[i] = affine_batch_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)], \n self.params['gamma' + str(i+1)],\n self.params['beta' + str(i+1)],\n self.bn_params[i])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n else:\n for i in range(L-1):\n\n out, past_caches[i] = affine_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n \n scores, past_caches[L-1] = affine_forward(out, self.params['W' + str(L)],\n self.params['b' + str(L)])\n \n ### backpropagation ###\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n \n loss_l2 = 0\n \n loss, dx = softmax_loss(scores, y)\n for i in range(L-1): \n W = 'W' + str(i+1)\n loss_l2 += np.sum(self.params[W]*self.params[W])\n loss_l2 *= 0.5 * self.reg\n loss += loss_l2\n \n W_final = 'W'+str(L)\n b_final = 'b'+str(L)\n dx, grads[W_final], grads[b_final] = affine_backward(dx, past_caches[L-1])\n grads[W_final] += self.reg * self.params[W_final]\n \n if self.use_batchnorm:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n gamma = 'gamma' + str(ind)\n beta = 'beta' + str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b], grads[gamma], grads[beta] = affine_batch_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n else:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b] = affine_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n return loss, grads", "def loss(self, X, labels):\n features = self.get_conv_feats(X)\n loss = blah\n return loss", "def nn_cost_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, l):\n Theta_1 = np.reshape(nn_params[0:(hidden_layer_size * (input_layer_size + 1)), ],\n (hidden_layer_size, input_layer_size + 1))\n Theta_2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):, ],\n (num_labels, hidden_layer_size + 1))\n\n m, n = X.shape\n X = np.hstack((np.ones((m, 1)), X))\n\n Z_2 = X.dot(Theta_1.T)\n A_2 = sigmoid(Z_2)\n A_2 = np.hstack((np.ones((m, 1)), A_2))\n\n Z_3 = A_2.dot(Theta_2.T)\n A_3 = sigmoid(Z_3)\n\n Y = np.zeros((m, num_labels))\n for i in range(m):\n Y[i, y[i] - 1] = 1\n\n j = 0.0\n for i in range(m):\n j += np.log(A_3[i, ]).dot(-Y[i, ].T) - np.log(1 - A_3[i, ]).dot(1 - Y[i, ].T)\n j /= m\n\n Theta_1_square = np.square(Theta_1[:, 1:])\n Theta_2_square = np.square(Theta_2[:, 1:])\n reg = 1.0 * l / (2 * m) * (np.sum(Theta_1_square) + np.sum(Theta_2_square))\n j += reg\n\n d_3 = A_3 - Y\n D_2 = d_3.T.dot(A_2)\n\n Z_2 = np.hstack((np.ones((m, 1)), Z_2))\n d_2 = d_3.dot(Theta_2) * sigmoid_gradient(Z_2)\n d_2 = d_2[:, 1:]\n D_1 = d_2.T.dot(X)\n\n Theta_1_grad = 1.0 * D_1 / m\n Theta_1_grad[:, 1:] = Theta_1_grad[:, 1:] + 1.0 * l / m * Theta_1[:, 1:]\n\n Theta_2_grad = 1.0 * D_2 / m\n Theta_2_grad[:, 1:] = Theta_2_grad[:, 1:] + 1.0 * l / m * Theta_2[:, 1:]\n\n grad = np.hstack((Theta_1_grad.ravel(), Theta_2_grad.ravel()))\n\n return j, grad", "def forward(self, y_pred: Dict[str, torch.Tensor], target: Union[torch.Tensor, rnn.PackedSequence]) -> torch.Tensor:\n # unpack\n if isinstance(target, rnn.PackedSequence):\n target, lengths = rnn.pad_packed_sequence(target, batch_first=True)\n # batch sizes reside on the CPU by default -> we need to bring them to GPU\n lengths = lengths.to(target.device)\n else:\n lengths = torch.ones(target.size(0), device=target.device, dtype=torch.long) * target.size(1)\n assert not target.requires_grad\n\n # calculate loss with \"none\" reduction\n if target.ndim == 3:\n weight = target[..., 1]\n target = target[..., 0]\n else:\n weight = None\n\n losses = self.loss(y_pred, target)\n # weight samples\n if weight is not None:\n losses = losses * weight.unsqueeze(-1)\n\n # mask loss\n mask = torch.arange(target.size(1), device=target.device).unsqueeze(0) >= lengths.unsqueeze(-1)\n if losses.ndim > 2:\n mask = mask.unsqueeze(-1)\n dim_normalizer = losses.size(-1)\n else:\n dim_normalizer = 1.0\n # reduce to one number\n if self.reduction == \"none\":\n loss = losses.masked_fill(mask, float(\"nan\"))\n else:\n if self.reduction == \"mean\":\n losses = losses.masked_fill(mask, 0.0)\n loss = losses.sum() / lengths.sum() / dim_normalizer\n elif self.reduction == \"sqrt-mean\":\n losses = losses.masked_fill(mask, 0.0)\n loss = losses.sum() / lengths.sum() / dim_normalizer\n loss = loss.sqrt()\n assert not torch.isnan(loss), (\n \"Loss should not be nan - i.e. something went wrong \"\n \"in calculating the loss (e.g. log of a negative number)\"\n )\n assert torch.isfinite(\n loss\n ), \"Loss should not be infinite - i.e. something went wrong (e.g. input is not in log space)\"\n return loss", "def backpropagation(\n self, X, y, h1_input, h1_output, h2_input, h2_output, final_output\n ):\n # back propagate loss per one data\n output_error_term = self.softmax_cross_entropy_loss(y, final_output)\n db3 = np.sum(output_error_term, axis=0)\n dW3 = np.dot(h2_output.T, output_error_term)\n\n h2_error_term = np.dot(\n output_error_term, self.W3.T\n ) * functions.relu_derivative(h2_input)\n db2 = np.sum(h2_error_term, axis=0)\n dW2 = np.dot(h1_output.T, h2_error_term)\n\n h1_error_term = np.dot(h2_error_term, self.W2.T) * functions.relu_derivative(\n h1_input\n )\n db1 = np.sum(h1_error_term, axis=0)\n dW1 = np.dot(X.T, h1_error_term)\n\n return dW1, db1, dW2, db2, dW3, db3", "def TEM_loss(anchors_action,anchors_startend,\n Y_action,Y_startend,config): \n loss_action,num_sample_action = binary_logistic_loss(Y_action,anchors_action)\n loss_startend,num_sample_startend = binary_logistic_loss(Y_startend,anchors_startend)\n # print('loss_action', loss_action)\n # print('loss_startend', loss_startend)\n loss={\"loss_action\":loss_action,\"num_sample_action\":num_sample_action,\n \"loss_startend\":loss_startend,\"num_sample_startend\":num_sample_startend}\n return loss", "def cross_entropy_loss(self, y, y_hat):\n if y.ndim == 1:\n batch_size = 1\n else:\n batch_size = y.shape[0]\n delta = 1e-7\n return -np.sum(y * np.log(y_hat + delta)) / batch_size", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def cost(h, y):\n\tm = y.shape[0]\n\tcost = (-1/m) * (y.T @ np.log(h) + (1 - y).T @ np.log(1 - h))\n\treturn cost", "def nn_model(X, Y, n_h=4, epochs=10000, print_cost = False):\n \n # set a random seed to get the same results\n np.random.seed(3)\n \n # set size of n_x and n_y\n n_x = set_leyer_sizes(X, Y)[0] \n n_y = set_leyer_sizes(X, Y)[2] \n \n # initialize parameters \n parameters = initialize_parameters(n_x, n_h, n_y)\n \n # set up loop for forward/backward propagation and gradient descent\n \n for i in range(0, epochs):\n # forward propagation\n A2, cache = forward_propagation(X, parameters)\n \n # Calculate the cost\n cost = calc_cost(A2, Y, parameters)\n \n # Backward propagation\n gradients = backward_propagation(parameters, cache, X, Y)\n \n # Update parameters using gradiente descent\n parameters = update_parameters(parameters, gradients)\n \n # Print cost every 1000 iterations\n if print_cost and i % 1000 == 0:\n print(\"The cost after {0}: {1}\".format(i, cost))\n \n return parameters", "def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads", "def loss(self, X, y=None):\n W1 = self.params['W1']\n mode = 'test' if y is None else 'train'\n\n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n cache = {}\n\n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n input = X\n for l in xrange(1, self.conv_layers + 1):\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n input, cache['cache%d' % l] = conv_norm_relu_pool_forward(input, W, b, conv_param, pool_param, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n input, cache['cache%d' % l] = conv_relu_pool_forward(input, W, b, conv_param, pool_param)\n\n l = self.conv_layers + 1\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n h_out, h_cache = affine_norm_relu_forward(input, W, b, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n h_out, h_cache = affine_relu_forward(input, W, b)\n\n l = l + 1\n W, b = self.get_params_for_layer(l)\n scores, scores_cache = affine_forward(h_out, W, b)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n loss, loss_dx = softmax_loss(scores, y)\n\n for l in xrange(1, self.num_layers + 1):\n loss += 0.5 * self.reg * np.sum(self.params['W%d' % l] * self.params['W%d' % l])\n\n l = self.num_layers\n scores_dx, scores_dw, scores_db = affine_backward(loss_dx, scores_cache)\n self.set_grads(l, grads, scores_dw, scores_db)\n l = l - 1\n\n if self.use_batchnorm:\n a_dx, a_dw, a_db, a_dgamma, a_dbeta = affine_norm_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db, a_dgamma, a_dbeta)\n else:\n a_dx, a_dw, a_db = affine_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db)\n l = l - 1\n\n conv_layers = l\n next_input = a_dx\n for l in xrange(conv_layers, 0, -1):\n current_cache = cache['cache%d' % l]\n if self.use_batchnorm:\n c_dx, c_dw, c_db, c_dgamma, c_dbeta = conv_norm_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db, c_dgamma, c_dbeta)\n else:\n c_dx, c_dw, c_db = conv_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db)\n next_input = c_dx\n\n for l in xrange(1, self.conv_layers + 3):\n grads['W%d' % l] += self.reg * self.params['W%d' % l]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def cross_entropoy_loss_naive(W, X, y, reg):\n # pylint: disable=too-many-locals\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient using explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n num_train_sample = X.shape[0] #row of train data\n num_class = W.shape[1] #column of weight, plane,horse..\n for i in range(num_train_sample):\n p_score = X[i].dot(W) #a row of score corresponding to each class\n p_score -= np.max(p_score) #normalize, highest is 1\n\n ###compute softmax loss\n # sum of scores corresponding to different classes of a sample \n sum_score = np.sum(np.exp(p_score)) \n # each class's score over sum_score of a sample \n score_i = lambda k: np.exp(p_score[k]) / sum_score\n # for the correct label in each sample, find softmax loss over sum\n # iteration make loss sum up all samples\n loss = loss - np.log(score_i(y[i]))\n\n for k in range(num_class):\n p_k = score_i(k)\n # gradient of softmax\n dW[:, k] += (p_k - (k == y[i])) * X[i]\n\n loss /= num_train_sample\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train_sample\n dW += reg*W\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def pwcnet_loss(y, y_hat_pyr, opts):\n # Use a different norm based on the training mode we're in (training vs fine-tuning)\n norm_order = 2 if opts['loss_fn'] == 'loss_multiscale' else 1\n\n with tf.name_scope(opts['loss_fn']):\n total_loss = 0.\n _, gt_height, _, _ = tf.unstack(tf.shape(y))\n\n # Add individual pyramid level losses to the total loss\n for lvl in range(opts['pyr_lvls'] - opts['flow_pred_lvl'] + 1):\n _, lvl_height, lvl_width, _ = tf.unstack(tf.shape(y_hat_pyr[lvl]))\n\n # Scale the full-size groundtruth to the correct lower res level\n scaled_flow_gt = tf.image.resize_bilinear(y, (lvl_height, lvl_width))\n scaled_flow_gt /= tf.cast(gt_height / lvl_height, dtype=tf.float32)\n\n # Compute the norm of the difference between scaled groundtruth and prediction\n if opts['use_mixed_precision'] is False:\n y_hat_pyr_lvl = y_hat_pyr[lvl]\n else:\n y_hat_pyr_lvl = tf.cast(y_hat_pyr[lvl], dtype=tf.float32)\n norm = tf.norm(scaled_flow_gt - y_hat_pyr_lvl, ord=norm_order, axis=3)\n level_loss = tf.reduce_mean(tf.reduce_sum(norm, axis=(1, 2)))\n\n # Scale total loss contribution of the loss at each individual level\n total_loss += opts['alphas'][lvl] * tf.pow(level_loss + opts['epsilon'], opts['q'])\n\n return total_loss", "def compute_cost(self,X, y):\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n \r\n one_hot_y = np.zeros((num_examples,np.max(y)+1))\r\n logloss = np.zeros((num_examples,)) \r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i,y[i]] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i,:]) * one_hot_y[i,:])\r\n data_loss = np.sum(logloss)\r\n return 1./num_examples * data_loss", "def loss(self, y_pred: Dict[str, torch.Tensor], target: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError()", "def loss(self, X, y=None):\r\n mode = 'test' if y is None else 'train'\r\n\r\n if self.dropout_param is not None:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batchnorm:\r\n for bn_param in self.bn_params:\r\n bn_param[mode] = mode\r\n\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n gamma1, beta1 = self.params['gamma1'], self.params['beta1']\r\n gamma2, beta2 = self.params['gamma2'], self.params['beta2']\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': int((filter_size - 1) / 2)}\r\n\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\r\n\r\n scores = None\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n alpha = 0.1\r\n csrp1, csrp1_cache = conv_sbn_lrelu_pool_forward(X, W1, b1, gamma1, beta1, self.bn_params[0], conv_param, pool_param, alpha)\r\n abr1, abr1_cache = affine_bn_lrelu_forward(csrp1, W2, b2, gamma2, beta2, self.bn_params[1], alpha)\r\n scores, out_cache = affine_forward(abr1, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n loss, grads = 0, {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n loss, dp = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * np.sum(\r\n np.sum(W1 ** 2) + np.sum(W2 ** 2) + np.sum(W3 ** 2)\r\n )\r\n dp, dw3, db3 = affine_backward(dp, out_cache)\r\n dp, dw2, db2, dgamma2, dbeta2 = affine_bn_lrelu_backward(dp, abr1_cache)\r\n dp, dw1, db1, dgamma1, dbeta1 = conv_sbn_lrelu_pool_backward(dp, csrp1_cache)\r\n grads['W1'] = dw1 + self.reg * W1\r\n grads['W2'] = dw2 + self.reg * W2\r\n grads['W3'] = dw3 + self.reg * W3\r\n grads['b1'] = db1\r\n grads['b2'] = db2\r\n grads['b3'] = db3\r\n grads['gamma2'] = dgamma2\r\n grads['gamma1'] = dgamma1\r\n grads['beta2'] = dbeta2\r\n grads['beta1'] = dbeta1\r\n \r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def output_loss_and_grads(self, h, V, c, y):\n\n loss, dh, dV, dc = 0.0, [], np.zeros_like(self.V), np.zeros_like(self.c)\n # calculate the output (o) - unnormalized log probabilities of classes\n # calculate yhat - softmax of the output\n # calculate the cross-entropy loss\n # calculate the derivative of the cross-entropy softmax loss with respect to the output (o)\n # calculate the gradients with respect to the output parameters V and c\n # calculate the gradients with respect to the hidden layer h\n for t in range(self.sequence_length):\n hp = h[:, t, :] # BS x H\n #o = self.output(hp, V, c) # leng x BS\n o = self.output(hp, V, c) # BS x leng\n #exp = np.exp(o) # leng x BS\n exp = np.exp(o) # BS x leng\n #s = exp / np.sum(exp, axis=0, keepdims=True) # leng x BS\n s = exp / np.sum(exp, axis=1, keepdims=True) # BS x leng\n yp = y[:, t, :]\n #dO = s - yp # leng x BS\n dO = s - yp # BS x leng\n #dV += np.dot(dO, hp.T) # ( leng x BS ) * ( H x BS ).T = leng x H\n dV += np.dot(hp.T, dO) # ( BS x H ).T * ( BS x leng ) = H x leng\n #dc += np.sum(dO, axis=1).reshape([-1, 1]) #\n dc += np.sum(dO, axis=0).reshape([1, -1]) #\n #dh.append(np.dot(self.V.T, dO)) # ( leng x H ).T * ( leng x BS ) = ( BS x H )\n dh.append(np.dot(dO, self.V.T)) # ( BS x leng ) * ( H x leng ).T = ( BS x H )\n loss += -np.sum(np.log(s)*yp)\n return loss, np.array(dh), dV, dc", "def softmax_cross_entropy_loss(self, y, y_hat):\n batch_size = y.shape[0]\n return -(y - y_hat) / batch_size", "def calculate_loss(model, t, logits, labels):\n model_para = model.get_paramaters_list_reshape()\n myTF.calculate_para_dependence_loss(model_para,t)\n\n myTF.calculate_cross_entropy_loss(logits, labels)\n\n return tf.add_n(tf.get_collection('losses'), name='loss_total')", "def compute_loss(\n self,\n x: torch.Tensor,\n y: torch.Tensor,\n y_hat: torch.Tensor,\n extras: List[torch.Tensor],\n train_context: TrainContext,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def forward(self, y, h):\n y = y.transpose(1, 0)\n\n t = self.linear_in(h)\n target = self.linear_in(h).unsqueeze(2) # batch x dim x 1\n\n # Get attention\n attn = torch.bmm(y, target).squeeze(2) # batch x T\n attn = F.softmax(attn, dim=1)\n attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x T\n\n weighted_y = torch.bmm(attn3, y).squeeze(1) # batch x dim\n h_tilde = torch.cat((weighted_y, h), 1)\n\n h_tilde = torch.tanh(self.linear_out(h_tilde))\n\n return h_tilde, attn", "def loss(self, X, labels):\n features = self.get_conv_features(X)\n loss = blah\n return loss", "def forcing_binary_loss(h):\n n_batch = h.data.shape[0]\n n_units = h.data.shape[1]\n\n loss = -1 * F.sum((h - 0.5) ** 2) / (n_units * n_batch)\n return loss", "def output_layer(self, h_, labels_):\n with tf.name_scope(\"Output_Layer\"):\n self.W_out_ = tf.get_variable(\"W_out\", shape=[h_.get_shape()[1].value,len(self.embed[0])], initializer=tf.random_normal_initializer())\n self.b_out_ = tf.get_variable(\"b_out\", shape=[len(self.embed[0])], initializer=tf.zeros_initializer())\n self.logits_ = tf.add(tf.matmul(h_,self.W_out_),self.b_out_)\n self.activated_out_ = tf.tanh(self.logits_) \n\n with tf.name_scope(\"Loss\"):\n self.loss_ = tf.reduce_sum(tf.square(tf.norm(tf.subtract(labels_, self.activated_out_),axis=1)))\n self.optimizer_ = tf.train.AdamOptimizer(learning_rate = self.learning_rate_)\n gradients_, variables_ = zip(*self.optimizer_.compute_gradients(self.loss_))\n clipped_grads_, _ = tf.clip_by_global_norm(gradients_, self.max_grad_norm_)\n self.train_step_ = self.optimizer_.apply_gradients(zip(clipped_grads_,variables_))", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)\n scores, scores_cache = affine_forward(hidden_out, W3, b3)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n\n # Compute loss and gradients\n loss, dscores = softmax_loss(scores, y)\n dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)\n dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)\n dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)\n\n # Regularization\n loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)\n grads['W3'] = grads['W3'] + self.reg * self.params['W3']\n grads['W2'] = grads['W2'] + self.reg * self.params['W2']\n grads['W1'] = grads['W1'] + self.reg * self.params['W1']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss(self, X, y=None):\n\t\tmode = 'test' if y is None else 'train'\n\t\tif self.dropout_param is not None:\n\t\t\tself.dropout_param['mode'] = mode\n\t\tif self.use_batchnorm:\n\t\t\tfor bn_param in self.bn_params:\n\t\t\t\tbn_param[mode] = mode\n\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\tW5, b5 = self.params['W5'], self.params['b5']\n\t\t\n\t\tgamma1, beta1 = self.params['gamma1'], self.params['beta1']\n\t\tgamma2, beta2 = self.params['gamma2'], self.params['beta2']\n\t\tgamma3, beta3 = self.params['gamma3'], self.params['beta3']\t\n\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size1 = W1.shape[2]\n\t\tconv_param1 = {'stride': 1, 'pad': (filter_size1 - 1) / 2}\n\t\tfilter_size2 = W2.shape[2]\n\t\tconv_param2 = {'stride': 1, 'pad': (filter_size2 - 1) / 2}\n\t\t\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\t\t\n\t\tscores = None\n\t\n\t\t# Convolutional layers\t\n\t\tz1, cache1 = conv_relu_forward(X, W1, b1, conv_param1)\n\t\tz2, cache2 = conv_relu_pool_forward(z1, W2, b2, conv_param2, pool_param)\n\t\tz3, cache3 = spatial_batchnorm_forward(z2, gamma1, beta1, self.bn_params[1])\n\n\t\t# Fully Connected layers\n\t\tz4, cache4 = affine_relu_bn_forward(z3, W3, b3, gamma2, beta2, self.bn_params[2])\n\t\tz4, cache9 = dropout_forward(z4, self.dropout_params)\n\n\t\t# Output layer\n\t\tz6, cache6 = affine_forward(z4, W5, b5)\n\t\tz7, cache7 = batchnorm_forward(z6, gamma3, beta3, self.bn_params[3])\n\t\t#z8, cache8 = dropout_forward(z7, self.dropout_params)\n\t\tscores = z7\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W1'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W5'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W3'], 2).sum())\n\t\t\n\t\t#dx8 = dropout_backward(dout, cache8)\n\t\tdx7, grads['gamma3'], grads['beta3'] = batchnorm_backward(dout, cache7)\n\t\tdx6, grads['W5'], grads['b5'] = affine_backward(dx7, cache6)\n\t\tdx6 = dropout_backward(dx6, cache9)\n\t\tdx4, grads['W3'], grads['b3'], grads['gamma2'], grads['beta2'] = affine_relu_bn_backward(dx6, cache4)\n\t\t\n\t\tdx3, grads['gamma1'], grads['beta1'] = spatial_batchnorm_backward(dx4, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = conv_relu_pool_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_backward(dx2, cache1)\n\t\t\n\t\treturn loss, grads", "def kl_loss(y_true, y_pred):\n #y_true = tf.constant(y_true, dtype=tf.dtypes.float64)\n #y_pred = tf.constant(y_pred, dtype=tf.dtypes.float64)\n #y_true = tf.reshape(y_true, shape=[y_true.shape[0], y_true.shape[1] * y_true.shape[2]])\n #y_pred = tf.reshape(y_pred, shape=[y_pred.shape[0], y_pred.shape[1] * y_pred.shape[2]])\n loss = 0\n if y_true.shape[-1] is None:\n return 0.0\n for i in range(y_true.shape[-1]):\n hist_true = np.histogram(y_true[:,i], bins=10, density=True)\n hist_pred = np.histogram(y_pred[:,i], bins=10, density=True)\n loss += kl(hist_true[0], hist_pred[0])\n return loss", "def _generator_loss(self, y_hat):\n\n l = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat ))\n print('generatorloss shape',tf.shape(l))\n return l", "def heteroscedastic_loss(y_true, y_mean, y_var, **args):\n\n logvar = tf.reduce_sum(input_tensor=0.5 * tf.math.log(y_var), axis=-1)\n squared_error = tf.reduce_sum(input_tensor=0.5 * tf.square(y_true - y_mean) / y_var, axis=-1)\n loss = tf.reduce_mean(input_tensor=squared_error + logvar)\n return loss", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def loss_labels(self, outputs, targets, indices, num_boxes):\n\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n\n idx = self._get_src_permutation_idx(indices)\n # gt idx for each query matched , len is batch size\n tgt_idx = [i[torch.where(i >= 0)] for i in indices]\n target_classes_o = torch.cat([t['labels'][j] for t, j in zip(targets, tgt_idx)])\n # [bs, query]\n target_classes = torch.zeros(src_logits.shape[:2], dtype=torch.int64, device=src_logits.device)\n\n target_classes[idx] = target_classes_o\n ignore_idx = self._get_tgt_permutation_idx(indices)\n\n loss_ce = sigmoid_focal_loss(src_logits, target_classes.unsqueeze(-1).float(), num_boxes, alpha=self.focal_alpha, gamma=self.gammma, ignore=ignore_idx) * src_logits.shape[1]\n\n losses = {'loss_ce': loss_ce}\n\n return losses", "def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )", "def get_loss(y_):\n m_ = tf_lab_q.dequeue_many(BATCH_SIZE)\n y_m_ = tf.mul(y_, ones_)\n y_diff_ = tf.sub(y_m_, tf.transpose(y_m_))\n t_1_ = -tf.mul(0.95*ones_, y_diff_)\n t_2_ = tf.log(ones_ + tf.exp(y_diff_))\n sum_ = tf.add(t_1_, t_2_)\n mult_sum_ = tf.mul(m_, sum_)\n loss_ = tf.reduce_sum(mult_sum_) / tf.reduce_sum(m_)\n return loss_, m_", "def cross_entropoy_loss_naive(W, X, y, reg):\n # pylint: disable=too-many-locals\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n \n\n C = W.shape[1]\n# print(\"no. of classes {}\".format(C))\n N,D = X.shape\n# print(\"no. of data {} and dimension {}\".format(N,D))\n for i in range(N):\n xi = X[i,:]\n# print(\"one record shape: {}\".format(xi.shape))\n scores = np.zeros(C)\n for c in range(C):\n w = W[:,c]\n# print(\"weight for one record {}\".format(w.shape))\n scores[c] = xi.dot(w)\n scores -= np.max(scores)\n actual_y = y[i]\n total_score = np.sum(np.exp(scores)) \n loss_i = -scores[actual_y] + np.log(total_score)\n# print('naive score : {}'.format(scores[actual_y]))\n loss += loss_i\n \n #gradient\n probability = np.exp(scores)/total_score\n for j in range(C):\n dW[:,j] += probability[j]*xi\n \n dW[:,actual_y] -= xi\n loss = loss/N\n reg_loss = 0.5*reg*np.sum(W*W)\n loss = loss + reg_loss\n print(\"loss : {}\".format(loss))\n dW = dW/N\n dW += reg*W\n \n \n \n \n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient using explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n N, C, H, W = X.shape;\n\n #print 'X shape = ' + str(X.shape);\n\n # Get conv layer output. Note that it is not 2-dimensional \n # conv - relu - 2x2 maxpool\n v1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param);\n\n #print 'v1 shape = ' + str(v1.shape);\n\n # Reshape to 2D\n v1shape = v1.shape; # Used to reshape back to original form in backward pass\n v1 = np.reshape(v1,(N,-1));\n #print 'v1 shape = ' + str(v1.shape);\n\n # Feed forward to hidden layer (affine-relu)\n v2, cache2 = affine_relu_forward(v1, W2, b2);\n #print 'v2 shape = ' + str(v2.shape);\n\n # Feed forward to final layer (affine only)\n v3, cache3 = affine_forward(v2, W3, b3)\n #print 'v3 shape = ' + str(v3.shape);\n\n # Compute scores\n scores = v3;\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n # Calculate softmax loss from layer 2 output\n # Loss gets regularized here\n # Each separate gradient must be regularized later when calculated\n loss, dv3 = softmax_loss(scores,y); # Softmax loss and gradient\n #print 'dv3 shape = ' + str(dv3.shape);\n reg = self.reg;\n loss += 0.5 * reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3)); # Regularize\n\n # Do backward pass through layer 2 affine\n dv2, dw3, db3 = affine_backward(dv3, cache3);\n dw3 += reg*W3; # Regularize\n #print 'dv2 shape = ' + str(dv2.shape);\n\n\n # Backward pass through hidden layer\n dv1, dw2, db2 = affine_relu_backward(dv2, cache2);\n dw2 += reg*W2; # Regularize\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Reshape dv1 to be compatible with convolutional layer\n dv1 = np.reshape(dv1,v1shape);\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Do backward pass through convolutional layer\n dx, dw1, db1 = conv_relu_pool_backward(dv1, cache1);\n dw1 += reg*W1; # Regularize\n\n # Store all weight and bias gradients in grads\n grads['W1'] = dw1; grads['b1'] = db1;\n grads['W2'] = dw2; grads['b2'] = db2;\n grads['W3'] = dw3; grads['b3'] = db3;\n\n\n\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss(self, features: Tensor, tags: Tensor, masks: Tensor):\n \n features = self.linear(features)\n T = features.size(1)\n masks_ = masks[:, :T].float()\n forward_score = self.forward_algorithm(features, masks_)\n gold_score = self._score(features, tags[:, :T].long(), masks_)\n loss = (forward_score - gold_score).mean()\n return loss", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param['mode'] = mode\n if self.normalization=='batchnorm':\n for bn_param in self.bn_params:\n bn_param['mode'] = mode\n ############################################################################\n # TODO: Implement the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n # #\n # When using dropout, you'll need to pass self.dropout_param to each #\n # dropout forward pass. #\n # #\n # When using batch normalization, you'll need to pass self.bn_params[0] to #\n # the forward pass for the first batch normalization layer, pass #\n # self.bn_params[1] to the forward pass for the second batch normalization #\n # layer, etc. #\n ############################################################################\n cache = {} # 需要存储反向传播需要的参数\n cache_dropout = {}\n hidden = X\n for i in range(self.num_layers - 1):\n if self.normalization == 'batchnorm':\n hidden,cache[i+1] = affine_bn_relu_forward(hidden,\n self.params['W' + str(i+1)],\n self.params['b' + str(i+1)],\n self.params['gamma' + str(i+1)],\n self.params['beta' + str(i+1)],\n self.bn_params[i])\n elif self.normalization == 'layernorm':\n hidden, cache[i + 1] = affine_ln_relu_forward(hidden,\n self.params['W' + str(i + 1)],\n self.params['b' + str(i + 1)],\n self.params['gamma' + str(i + 1)],\n self.params['beta' + str(i + 1)],\n self.bn_params[i])\n else:\n hidden , cache[i+1] = affine_relu_forward(hidden,self.params['W' + str(i+1)],\n self.params['b' + str(i+1)])\n if self.use_dropout:\n hidden , cache_dropout[i+1] = dropout_forward(hidden,self.dropout_param)\n # 最后一层不用激活层\n scores, cache[self.num_layers] = affine_forward(hidden , self.params['W' + str(self.num_layers)],\n self.params['b' + str(self.num_layers)])\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n ############################################################################\n # TODO: Implement the backward pass for the fully-connected net. Store the #\n # loss in the loss variable and gradients in the grads dictionary. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # When using batch/layer normalization, you don't need to regularize the scale #\n # and shift parameters. #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, grads = 0.0, {}\n loss, dS = softmax_loss(scores , y)\n # 最后一层没有relu激活层\n dhidden, grads['W' + str(self.num_layers)], grads['b' + str(self.num_layers)] \\\n = affine_backward(dS,cache[self.num_layers])\n loss += 0.5 * self.reg * np.sum(self.params['W' + str(self.num_layers)] * self.params['W' + str(self.num_layers)])\n grads['W' + str(self.num_layers)] += self.reg * self.params['W' + str(self.num_layers)]\n\n for i in range(self.num_layers - 1, 0, -1):\n loss += 0.5 * self.reg * np.sum(self.params[\"W\" + str(i)] * self.params[\"W\" + str(i)])\n # 倒着求梯度\n if self.use_dropout:\n dhidden = dropout_backward(dhidden,cache_dropout[i])\n if self.normalization == 'batchnorm':\n dhidden, dw, db, dgamma, dbeta = affine_bn_relu_backward(dhidden, cache[i])\n grads['gamma' + str(i)] = dgamma\n grads['beta' + str(i)] = dbeta\n elif self.normalization == 'layernorm':\n dhidden, dw, db, dgamma, dbeta = affine_ln_relu_backward(dhidden, cache[i])\n grads['gamma' + str(i)] = dgamma\n grads['beta' + str(i)] = dbeta\n else:\n dhidden, dw, db = affine_relu_backward(dhidden, cache[i])\n grads['W' + str(i)] = dw + self.reg * self.params['W' + str(i)]\n grads['b' + str(i)] = db\n return loss, grads", "def setup_loss(logits, labels):\n predictions = tf.nn.softmax(logits)\n cost = tf.losses.softmax_cross_entropy(onehot_labels=labels,\n logits=logits,\n )\n return predictions, cost", "def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass", "def loss(self, X, y=None):\r\n\r\n # Findout if it's trainig or test time\r\n mode = 'train'\r\n if y is None:\r\n mode = 'test'\r\n\r\n # Set the mode for batch normalization and dropout parameters if needed.\r\n if self.use_batch_norm:\r\n for bn_param in self.bn_params:\r\n bn_param['mode'] = mode\r\n if self.use_dropout:\r\n self.dropout_params['mode'] = mode\r\n\r\n # Compute the forward pass fo the cnn.\r\n caches = []\r\n input_layer = X\r\n for i in range(1, self.num_conv_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = conv_bn_relu_pool_forward(input_layer, w, b, gamma, beta,\r\n self.conv_params, self.bn_params[i-1], \r\n self.pool_params)\r\n else:\r\n layer_score, layer_cache = conv_relu_pool_forward(input_layer, w, b, self.conv_params, \r\n self.pool_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the fully connected net.\r\n num_layers = self.num_conv_layers + self.num_hidden_layers\r\n for i in range(self.num_conv_layers+1, num_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = affine_bn_relu_forward(input_layer, w, b, gamma, beta,\r\n self.bn_params[i-1],\r\n dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n else:\r\n layer_score, layer_cache = affine_relu_forward(input_layer, w, b, dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the output layer.\r\n w = self.params['W{}'.format(i+1)]\r\n b = self.params['b{}'.format(i+1)]\r\n scores, output_cache = affine_forward(input_layer, w, b)\r\n\r\n # If testing time return the scores\r\n if mode == 'test':\r\n return scores\r\n\r\n # Compute the loss\r\n loss, dscores = softmax_loss(scores, y)\r\n\r\n # Add regularization to the loss and the corresponding gradient.\r\n grads = {}\r\n for i in range(1, num_layers+2):\r\n w = 'W{}'.format(i)\r\n loss += 0.5 * self.reg * np.sum(self.params[w]**2)\r\n grads[w] = self.reg * self.params[w]\r\n\r\n # Compute the gradients using backprop on the fully connected net.\r\n # Start with the output layer\r\n w = 'W{}'.format(num_layers+1)\r\n b = 'b{}'.format(num_layers+1)\r\n dx, dw, db = affine_backward(dscores, output_cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n for i in range(num_layers, self.num_conv_layers, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = affine_bn_relu_backward(dx, cache, self.use_dropout)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = affine_relu_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n # Compute the gradeints using backprop on the convolutional layers.\r\n for i in range(self.num_conv_layers, 0, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = conv_bn_relu_pool_backward(dx, cache)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = conv_relu_pool_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n return loss, grads", "def compute_loss(self, text, mel_in, mel_target):\n y, ylogit , a = self.forward(text, mel_in)\n\n # compute loss\n l1_loss = F.mse_loss(y, mel_target)\n bin_loss = F.binary_cross_entropy_with_logits(ylogit, mel_target)\n # now attention loss\n N = text.shape[-1]\n T = mel_in.shape[-1]\n def w_fun(n, t):\n return 1 - np.exp(-((n/(N-1) - t/(T-1))**2) / (2 * self.hp.g**2))\n w = np.fromfunction(w_fun, (a.shape[1], T), dtype='f')\n w = torch.from_numpy(w).to(self.device)\n loss_att = (w * a[:, :, :T]).mean()\n loss = l1_loss + bin_loss + loss_att\n return loss, y, a", "def yolo_layer_loss(y_true, anchors, yolo_outputs, grid_shapes, input_shape, num_classes, batch_size, ignore_thresh):\n batch_size_float = K.cast(batch_size, K.dtype(yolo_outputs[0]))\n\n # Get object mask to filter out boxes with score 0\n object_mask = y_true[..., 4:5] # the class scores (always 1 in training)\n object_mask_bool = K.cast(object_mask, 'bool') # convert to boolean\n\n # Apply yolo head to get the prediction\n grid, raw_prediction, prediction_xy, prediction_wh = yolo_head(yolo_outputs, anchors, num_classes,\n input_shape, calc_loss=True)\n prediction_box = K.concatenate([prediction_xy, prediction_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[..., :2] * grid_shapes[::-1] - grid\n\n raw_true_wh = K.log(y_true[..., 2:4] / anchors * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n\n # = 2 - true box size\n box_loss_scale = 2 - y_true[..., 2:3] * y_true[..., 3:4]\n\n # Compute filter mask to ignore boxes whose iou is below the threshold\n filter_mask = filter_by_iou(y_true, object_mask_bool, prediction_box, ignore_thresh, batch_size)\n\n # Compute losses\n xy_loss, wh_loss = compute_box_loss(object_mask, raw_true_xy, raw_true_wh,\n raw_prediction, box_loss_scale, batch_size_float)\n confidence_loss = compute_confidence_loss(object_mask, filter_mask, raw_prediction, batch_size_float)\n class_loss = compute_class_loss(y_true, raw_prediction, object_mask, batch_size_float)\n\n loss = xy_loss + wh_loss + confidence_loss + class_loss\n\n print_data = [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(filter_mask)]\n\n return loss, print_data", "def loss(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n c2 = 0\n c1 = (np.log(1+np.exp(-1*y1*k1)))\n for i in range(N):\n c2 += c1[i][0]\n l = c2 / N + (0.5 * self.l2_reg * np.dot(self.w,np.transpose(self.w)))\n l1 = l[0][0]\n return l1\n\n\n #raise NotImplementedError", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param['mode'] = mode\n if self.normalization=='batchnorm':\n for bn_param in self.bn_params:\n bn_param['mode'] = mode\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n # #\n # When using dropout, you'll need to pass self.dropout_param to each #\n # dropout forward pass. #\n # #\n # When using batch normalization, you'll need to pass self.bn_params[0] to #\n # the forward pass for the first batch normalization layer, pass #\n # self.bn_params[1] to the forward pass for the second batch normalization #\n # layer, etc. #\n ############################################################################\n if not self.use_dropout:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n cache, scores = self._AffRelu_Loss(X)\n elif self.normalization is \"batchnorm\":\n cache, scores = self._AffBatchRelu_Loss(X)\n elif self.normalization is \"layernorm\":\n cache, scores = self._AffLayerRelu_Loss(X)\n else:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n cache, scores = self._AffReluDrop_Loss(X)\n elif self.normalization is \"batchnorm\":\n cache, scores = self._AffBatchReluDrop_Loss(X)\n elif self.normalization is \"layernorm\":\n cache, scores = self._AffLayerReluDrop_Loss(X)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n \n ############################################################################\n # TODO: Implement the backward pass for the fully-connected net. Store the #\n # loss in the loss variable and gradients in the grads dictionary. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # When using batch/layer normalization, you don't need to regularize the scale #\n # and shift parameters. #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n loss, dscores = softmax_loss(scores, y)\n if not self.use_dropout:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n grads, l2_loss = self._AffRelu_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"batchnorm\":\n grads, l2_loss = self._AffBatchRelu_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"layernorm\":\n grads, l2_loss = self._AffLayerRelu_Backprop(dscores, cache)\n loss += l2_loss\n else:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n grads, l2_loss = self._AffReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"batchnorm\":\n grads, l2_loss = self._AffBatchReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"layernorm\":\n grads, l2_loss = self._AffLayerReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def svm_loss(x, y):\n x = np.squeeze(x)\n N = x.shape[0]\n yt = y\n yt[y==0]=-1\n tmp = 1-yt*x\n mask = np.ones_like(tmp)\n mask[tmp<=0] = 0\n tmp = tmp*mask\n loss = np.sum(tmp)/N\n \n dx = -yt*mask/N\n # dx = np.reshape(dx,[dx.shape[0],1])\n return loss, dx", "def FL_loss(target, output, from_logits=False):\n\n # Note: tf.nn.sigmoid_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n\n # if not from_logits:\n # # transform back to logits\n # _epsilon = _to_tensor(epsilon(), output.dtype.base_dtype)\n # output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n # output = tf.log(output / (1 - output))\n\n loss_1 = tf.nn.sigmoid_cross_entropy_with_logits(labels=target[0], logits=output[0])\n loss_2 = tf.keras.losses.KLDivergence()(target[1], output[1])\n\n return loss_1 + loss_2", "def rpn_cls_loss(*args):\n y_true, y_pred = args if len(args) == 2 else args[0]\n indices = tf.where(tf.not_equal(y_true, -1))\n target = tf.gather_nd(y_true, indices)\n output = tf.gather_nd(y_pred, indices)\n lf = tf.losses.BinaryCrossentropy()\n return lf(target, output)", "def sigmoid_cross_entropy(y, label):\r\n losses = - np.log(y + g_epsilon) * label - np.log(1.0 - y + g_epsilon) * (1.0 - label)\r\n return losses", "def bp_mll_loss(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\n\n # get true and false labels\n shape = tf.shape(y_true)\n y_i = tf.equal(y_true, tf.ones(shape))\n y_i_bar = tf.not_equal(y_true, tf.ones(shape))\n\n # get indices to check\n truth_matrix = tf.cast(pairwise_and(y_i, y_i_bar), dtype=tf.float32)\n\n # calculate all exp'd differences\n sub_matrix = pairwise_sub(y_pred, y_pred)\n exp_matrix = tf.exp(tf.negative(sub_matrix))\n\n # check which differences to consider and sum them\n sparse_matrix = tf.multiply(exp_matrix, truth_matrix)\n sums = tf.reduce_sum(sparse_matrix, axis=[1,2])\n\n # get normalizing terms and apply them\n y_i_sizes = tf.reduce_sum(tf.cast(y_i, dtype=tf.float32), axis=1)\n y_i_bar_sizes = tf.reduce_sum(tf.cast(y_i_bar, dtype=tf.float32), axis=1)\n normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)\n results = tf.divide(sums, normalizers)\n\n # average error\n return tf.reduce_mean(results)", "def loss_func(y, y_hat):\n # TODO: implement the function. \n # Consider these functions: `tf.square`, `tf.reduce_sum`\n\n loss = tf.reduce_sum(tf.square(y_hat - y))\n\n return loss", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def cross_entropy_loss(outputs, labels): \n# works properly\n \n m = labels.shape[0]\n p = outputs\n log_likelihood = -1*torch.log(p[range(m),labels])\n loss = torch.sum(log_likelihood) / m\n return loss.item()", "def svm_loss(x, y):\n\n x = x.reshape((-1,1))\n y = y.reshape((-1,1))\n N,_ = x.shape\n \n y_p = np.where(y == 1,1,-1)\n \n losses = np.maximum(0,1-(x*y_p))\n loss = np.sum(losses)/N\n dx = np.where(losses > 0, 1, 0)*(-y_p)/N\n dx = dx.reshape((-1,))\n\n return loss, dx", "def output_loss_and_grads(self, h, y, V=None, c=None):\n\n if V is None:\n V = self.V\n if c is None:\n c = self.c\n\n batch_size = h.shape[0]\n np.testing.assert_array_equal(h.shape, (batch_size, self.sequence_length, self.hidden_size))\n\n yhat = self.output(h, V=V, c=c)\n\n np.testing.assert_array_equal(yhat.shape, (batch_size, self.sequence_length, self.vocab_size))\n\n loss = log_loss(y.reshape(-1, self.vocab_size), yhat.reshape(-1, self.vocab_size)) * self.sequence_length # Since it computes average cross_entropy loss, not accounting for sequence_length\n do = yhat - y # (batch_size, sequence_length, vocab_size)\n assert do.shape == (batch_size, self.sequence_length, self.vocab_size)\n\n dV = np.zeros_like(V)\n dc = np.zeros_like(c)\n dh = []\n for ddo, hh in zip(do.transpose(1, 0, 2), h.transpose(1, 0, 2)):\n assert ddo.shape == (batch_size, self.vocab_size)\n assert hh.shape == (batch_size, self.hidden_size)\n dV += np.dot(hh.T, ddo) / batch_size\n dc += np.average(ddo, axis=0)\n dh.append(np.dot(ddo, V.T))\n dh = np.array(dh).transpose(1, 0, 2) # batch major\n assert dh.shape == h.shape\n assert dh.shape == (batch_size, self.sequence_length, self.hidden_size)\n\n self.dV = dV\n self.dc = dc\n\n return loss, dh", "def loss(self, predictions, labels, labels_2, inputs, raw_inp):\n next_word = labels\n curr_label = tf.cast(labels_2, tf.float32)\n\n \n prediction_word = predictions[0]\n prediction_label = predictions[1]\n\n #initialising variables\n cross_entropy_next = tf.constant(0)\n cross_entropy_label = tf.constant(0)\n cross_entropy_label_similarity = tf.constant(0)\n cross_entropy_emb = tf.constant(0)\n \n self.prec_label, self.prec_label_op = tf.constant(1), tf.constant(1)\n self.recall_label, self.recall_label_op = tf.constant(1), tf.constant(1)\n self.label_sigmoid = tf.constant(0)\n\n \n if self.config.solver._next_node_loss:\n #<EOS> and <UNK> get encoded as 1 and 0 respectively\n #Count loss only for actual nodes\n \n raw_inp1 = tf.greater(tf.slice(raw_inp, [0,0],[-1, 1]), -1) #Make first column all True\n raw_inp2 = tf.greater(tf.slice(raw_inp, [0,1],[-1, -1]), 1) #Make only non (<EOS>,<UNK>) True\n raw_inp = tf.concat(1, [raw_inp1, raw_inp2]) #concatenate back to original shape\n raw_inp = tf.transpose(raw_inp) #Transpose raw_inp from batch*step to step*batch\n mask = [tf.reshape(tf.cast(raw_inp, tf.float32), [-1])] #Convert from bool to float and flatten array\n\n\n #<EOS> and <UNK> get encoded as 1 and 0 respectively\n #Transpose raw_inp from batch*step to shape*batch\n #Count loss only for actual nodes\n #Convert from bool to float and flatten array\n #mask = [tf.reshape(tf.cast(tf.greater(tf.transpose(raw_inp), 0), tf.float32), [-1])]\n\n #Vector to weigh different word losses\n #all_ones = [tf.ones([self.config.batch_size * self.config.num_steps])]\n\n #cross entropy loss for next word prediction\n cross_entropy_next = sequence_loss([prediction_word],[tf.reshape(next_word, [-1])], mask, self.config.data_sets._len_vocab)\n tf.add_to_collection('total_loss', cross_entropy_next)\n\n if self.config.solver._curr_label_loss:\n #Get the slice of tensor representing label '0' for all batch.seq\n #'0' label is assigned for <EOS> and the nodes whose labels are not known\n #Valid errors are only those which don't have '0' label\n valid = tf.cast(tf.less(tf.slice(curr_label, [0,0,0], [self.config.num_steps, self.config.batch_size, 1]), tf.constant(0.5)), tf.float32)\n #replicate along 3rd axis\n valid = tf.tile(valid, tf.pack([1,1,tf.shape(curr_label)[2]]))\n \n #Sigmoid activation\n self.label_sigmoid = tf.sigmoid(prediction_label)\n #binary cross entropy for labels\n cross_loss = tf.add(tf.log(1e-10 + self.label_sigmoid)*curr_label,\n tf.log(1e-10 + (1-self.label_sigmoid))*(1-curr_label))\n #only consider the loss for valid label predictions\n #[TODO] mean of all or mean of only valid ???\n cross_entropy_label = -1*tf.reduce_mean(tf.reduce_sum(cross_loss*valid,2))\n tf.add_to_collection('total_loss', cross_entropy_label)\n\n\n if self.config.solver._label_similarity_loss: \n #Label similarity loss \n label_sigmoid = tf.sigmoid(pred_label_reshaped)\n part1 = tf.slice(label_sigmoid, [0,0,0], [self.config.num_steps-1, self.config.batch_size, self.config.data_sets._len_labels])\n part2 = tf.slice(label_sigmoid, [1,0,0], [self.config.num_steps-1, self.config.batch_size, self.config.data_sets._len_labels])\n\n #Exponential weightage -> [r**(n-1), r**(n-2), ... , r**2. r**1]\n label_diffusion = tf.constant([self.config.data_sets._diffusion_rate**i for i in range(self.config.num_steps-1,0,-1)])\n cross_loss_sim = tf.add(tf.log(1e-10 + part1)*part2, tf.log(1e-10 + (1-part1))*(1-part2))\n #prediction is 3 dimensional (seq x batch x label_len), reduce along axis of label_len\n #Sum over each label error -> take mean over the batch -> sum for the sequence\n cross_entropy_label_similarity = tf.reduce_sum(tf.reduce_mean(-tf.reduce_sum(cross_loss_sim, 2),1) * label_diffusion)\n tf.add_to_collection('total_loss', cross_entropy_label_similarity)\n\n \n if self.config.solver._embedding_loss:\n #embedding similarity loss\n #Matching First input's embeddings with embeddings of other inputs\n #[TODO] reverse feed of input AND reverse diffusion rate\n \n emb_part1 = tf.slice(inputs, [self.config.num_steps-2,0,0], [1, self.config.batch_size, self.config.mRNN._embed_size])\n emb_part2 = tf.slice(inputs, [0,0,0], [self.config.num_steps-1, self.config.batch_size, self.config.mRNN._embed_size])\n\n #Exponential weightage -> [r**(n-1), r**(n-2), ... , r**2. r**1]\n label_diffusion = tf.constant([self.config.data_sets._diffusion_rate**i for i in range(self.config.num_steps-1,0,-1)])\n #Broadcastive Subtraction\n mse_emb = tf.reduce_mean(tf.square(emb_part2 - emb_part1),2)\n cross_entropy_emb = tf.reduce_sum(tf.reduce_mean(mse_emb,1) * label_diffusion) * self.config.data_sets._emb_factor\n tf.add_to_collection('total_loss', cross_entropy_emb)\n\n if self.config.solver._L2loss:\n vars = tf.trainable_variables() \n lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars])*0.00001\n tf.add_to_collection('total_loss', lossL2)\n\n loss = tf.add_n(tf.get_collection('total_loss'))\n grads, = tf.gradients(loss, [self.embedding]) \n\n tf.summary.scalar('next_node_loss', cross_entropy_next)\n tf.summary.scalar('curr_label_loss', cross_entropy_label)\n tf.summary.scalar('label_similarity_loss', cross_entropy_label_similarity )\n tf.summary.scalar('emb_loss', cross_entropy_emb)\n tf.summary.scalar('total_loss', tf.reduce_sum(loss))\n \n return [loss, cross_entropy_next, cross_entropy_label, cross_entropy_label_similarity, cross_entropy_emb, grads]", "def loss_fn(outputs, labels):\n return nn.CrossEntropyLoss()(outputs, labels)", "def loss(self, X, y=None, lambda_reg=0.0):\n \n # Unpack variables from the params dictionary\n N, D = X.shape\n\n # Compute the forward pass\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n scores, cache_list = self.network_forward(X)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute for the loss. This should include L2 regularization for #\n # the weights of each layer. #\n #############################################################################\n loss_softmax, dloss_softmax = self.softmax_cross_entropy_loss(scores, y)\n loss = loss_softmax\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute the derivatives of the weights and biases. Store the #\n # results in the grads dictionary. For example, grads['W1'] should store #\n # the gradient on the weights W of the first layer, and be a matrix of #\n # same size. #\n #############################################################################\n grads = self.network_backward(dloss_softmax, cache_list)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def nt_xent_loss(y_true, y_pred):\n [x,v] = tf.unstack(y_pred, num=2)\n x = tf.math.l2_normalize(x, -1)\n v = tf.math.l2_normalize(v, -1)\n\n batch_size = tf.shape(x)[0]\n masks = tf.one_hot(tf.range(batch_size), batch_size)\n labels = tf.one_hot(tf.range(batch_size), batch_size * 2)\n\n logits_x_x = tf.matmul(x, x, transpose_b=True) / 0.1\n logits_x_x = logits_x_x - masks * 1e9\n\n logits_v_v = tf.matmul(v, v, transpose_b=True) / 0.1\n logits_v_v = logits_v_v - masks * 1e9\n\n logits_x_v = tf.matmul(x, v, transpose_b=True) / 0.1\n logits_v_x = tf.matmul(v, x, transpose_b=True) / 0.1\n\n loss_x = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_x_v, logits_x_x], 1))\n loss_v = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_v_x, logits_v_v], 1))\n\n loss = tf.reduce_mean(loss_x + loss_v)\n\n return loss", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def loss(self, prediction_dict, groundtruth_lists):\r\n pass", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads" ]
[ "0.67652905", "0.6620769", "0.66202563", "0.6577398", "0.6380125", "0.6372819", "0.63684165", "0.63215613", "0.6316717", "0.6303203", "0.6277401", "0.62723595", "0.6269626", "0.62407196", "0.62390345", "0.6234942", "0.62235326", "0.6202289", "0.61824965", "0.61645675", "0.61480033", "0.61440444", "0.6140885", "0.6122215", "0.6119902", "0.61021054", "0.6100519", "0.6098364", "0.6097332", "0.60887617", "0.6083709", "0.6080258", "0.6059631", "0.60584706", "0.6052046", "0.6038304", "0.6030844", "0.60306525", "0.6029866", "0.6020625", "0.6018725", "0.6002822", "0.6000279", "0.59985346", "0.59978205", "0.59930986", "0.5991803", "0.5987222", "0.5985985", "0.5982393", "0.5981475", "0.59801924", "0.5978124", "0.59742236", "0.5973672", "0.5973532", "0.5969416", "0.59674174", "0.59649295", "0.59633887", "0.59545875", "0.5950515", "0.5943982", "0.5942717", "0.5939011", "0.5934975", "0.59271276", "0.59226835", "0.5918029", "0.5903238", "0.59010786", "0.5900436", "0.59002024", "0.59000903", "0.5892778", "0.5891178", "0.5880867", "0.5878841", "0.5878403", "0.58725226", "0.58724886", "0.5868474", "0.5865139", "0.58631045", "0.5852927", "0.5852655", "0.58505726", "0.5850544", "0.5848779", "0.58459896", "0.5834366", "0.583284", "0.58236015", "0.58222103", "0.5812177", "0.58101225", "0.58070314", "0.58067477", "0.58034176", "0.5797306" ]
0.72924393
0
Predict label on this batch
def predict(self, model, batch): device = list(model.parameters())[0].device batch = batch.to(device) inputs = batch.inputs # Extract features with the model h = model(*inputs) # predictions return self.predict_on_features(h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(self, X, pred_batch_size=None):", "def predict(self, X): \n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n j= 0\n predicted_labels = np.array([])\n while(j < X.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < X.shape[0] else X.shape[0]\n current_batch = X[j:current_batch_end]\n self._feedforward(current_batch)\n predicted_labels = np.append(predicted_labels, np.take(self.map_labels, self.bmu_indices))\n j = current_batch_end\n \n return predicted_labels", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def label_predict(self, sentence):\n index_words = FileUtils.index_sentence(sentence, self.word_to_index)\n chunks = FileUtils.divide_sentence(index_words, Settings.seq_size)\n result = np.zeros(Settings.class_num)\n if Settings.cuda:\n self.model.cuda()\n \n for chunk in chunks:\n with torch.no_grad():\n chunk = torch.from_numpy(np.asarray(chunk)).view(1, Settings.seq_size)\n if Settings.cuda:\n chunk = chunk.cuda()\n \n predict = self.model(chunk)\n if Settings.cuda:\n predict = predict.cpu()\n predict = predict.numpy()[0]\n result += predict\n result /= len(chunks)\n\n target_index = np.argmax(result) + 1\n label = self.index_to_label.get(str(target_index))\n score = np.max(result)\n return label, score", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict_batch(self, model, context, data=None):\n pass", "def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):\n if predict_label is None:\n predict_label = self.pred_label\n if weight is None: weight = self.weights[-1]\n pred = self.predict(x, weight, cutting)\n pred[np.where(pred == 0)] = predict_label[0]\n pred[np.where(pred == 1)] = predict_label[1]\n return pred", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self):\n raise NotImplementedError", "def predict(self):\n train_array = np.array(self.labels != 0, dtype=float)\n if not self.ising:\n labels_logit = self.ising_weights['vertices']\n else:\n neigh_num = self.adj.dot(train_array)\n neigh_num = np.where(neigh_num == 0, 1, neigh_num)\n neigh_weights = self.ising_weights['edges'] * self.labels\n labels_logit = (np.multiply(neigh_weights, neigh_num**(-1))\n + self.ising_weights['vertices'])\n self.prediction = np.where(labels_logit > 0, 1, -1)\n return self", "def predict(self, X):", "def predict(self, X):", "def target_predict(self, inp):\n return self.target_model.predict(inp)", "def predict_only(self):", "def predict(self, inputs):\n if self.use_logistic:\n return self.predict_labels_logistic(self.w, inputs)\n return predict_labels(self.w, inputs)", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "def predict(self, X):\n\n y_pred = np.zeros(X.shape[0])\n y_pred = np.argmax(np.dot(X,self.W), axis=1)\n ###########################################################################\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n\n return y_pred", "def predict_labels(model, x_test):\n \n pred = model.predict(x_test)\n #pred_labels = model.predict_classes(x_test) # depricated\n pred_labels = np.argmax(model.predict(x_test), axis=-1)\n \n return pred, pred_labels", "def predict(self, images, batch_size):\n pass", "def predict(wav, labels, graph, input_name, output_name, how_many_labels):\n pred_lab, pred_prob=label_wav(wav, labels, graph, input_name, output_name, how_many_labels)\n return pred_lab, pred_prob", "def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n batch = args[0]\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat", "def predict(self, predPoints=None):", "def predict(self):\n self.get_test_data()\n predicted_labels = []\n for row in self.test_data:\n predicted_labels.append(DecisionTree.predict_row(self.classifier, row))\n return predicted_labels", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def predict(self, X, k=1):\n dists = self.compute_distances(X)\n return self.predict_labels(dists, k=k)", "def predict(self,X):\n y_pred = np.random.choice(self.labels, size=(X.shape[0],), p=self.thresholds)\n return y_pred", "def predict ( self, X ):\n \n return self.knn.predict ( X )\n # End predict()", "def predict(self, X):\n raise NotImplementedError", "def predict(self, model, context, data):\n pass", "def predict(self, X):\n # Check is fit had been called\n check_is_fitted(self)\n X = self._clean(X)\n y = self.model_.predict(X)\n return self.map_label_inverse_(y)", "def predict(self, X):\n num_test = X.shape[0]\n # lets make sure that the output type matches the input type\n Ypred = np.zeros(num_test, dtype = self.ytr.dtype)\n\n # loop over all test rows\n for i in range(num_test):\n # find the nearest training image to the i'th test image\n # using the L1 distance (sum of absolute value differences)\n #distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1) # L1 distance\n distances = np.sqrt(np.sum(np.square(self.Xtr - X[i, :]), axis=1)) # L2 distance\n min_index = np.argmin(distances) # get the index with smallest distance\n Ypred[i] = self.ytr[min_index] # predict the label of the nearest example\n\n return Ypred", "def predict ( self, X: np.ndarray ):\n \n return self.knn.predict ( X )\n # End predict()", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict(self, data_in):\n pass", "def predict(self, sess, data_gen):\n pred_labels = np.array([], dtype=np.intp)\n for inputs, seq_length in data_gen:\n feed_dict = self.create_feed_dict(inputs, seq_length,\n dropout=self.config.dropout)\n pred = sess.run(self.pred, feed_dict)\n pred = softmax(pred, -1)\n pred = binarize(pred[:, 1], self.threshold)\n\n pred_labels = np.concatenate((pred_labels, pred))\n\n return pred_labels.astype(np.int8)", "def predict(self, **kwargs):\n raise NotImplementedError", "def _predict_and_return_argmax_label(self, example):\n model_out = self._model.predict([example])\n softmax = list(model_out)[0]['preds']\n argmax = np.argmax(softmax)\n return self._model.output_spec()['preds'].vocab[argmax]", "def _predict(self, X):\n predictions = np.asarray([clf.predict(X) for clf in self.clfs_]).T\n predicted_labels = self.combiner.combine(predictions)\n return predicted_labels", "def predict_proba(self):\n ...", "def test_predict(self):\n\n classifier = BertCCAMClassifier()\n classifier.load_model(\"models\")\n prediction = classifier.predict([\"bartosz\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}])\n\n # with multiple labels\n prediction = classifier.predict([\"ala bert\"])\n self.assertEqual(prediction, [{\"labels\": (\"A\", \"B\")}])\n\n # in a batch\n prediction = classifier.predict([\"bartosz\", \"adam\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}, {\"labels\": (\"A\",)}])", "def predict_from(self, inputs, to_layers):", "def predict(test_dataset,test_tX,weights):\n for idx, dataset in enumerate(test_tX):\n test_dataset[idx]['Prediction'] = predict_labels(weights[idx],dataset)\n return test_dataset", "def predict(self, X):\n # yields labels of the given dataset X after calling predict_proba\n A=self.predict_proba(X)\n y_hat=np.argmax(A,axis=0)\n return y_hat", "def predict(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n label = self.clf.predict(vec)\n # print label\n return self.labelmap[label[0]]", "def predict(self, X):\n y_pred = np.zeros(X.shape[0])\n ###########################################################################\n # TODO: #\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n y_pred = np.argmax(np.dot(X, self.W), axis=1)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return y_pred", "def predict(self, X, y=None):\n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n _, Predicted_Labels =\\\n RankSVM_test(test_data=X,\n num_class=self.num_class,\n Weights=self.Weights,\n Bias=self.Bias,\n SVs=self.SVs,\n svm=self.svm, gamma=self.gamma,\n coefficient=self.coefficient,\n degree=self.degree)\n\n return Predicted_Labels", "def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst", "def predict_on_batch(engine, batch):\n\t\tengine.model.eval()\n\t\tengine.model.rpn.nms_thresh = 0.3\n\t\twith torch.no_grad():\n\t\t\timgs, target = prepare_batch(batch, device=get_device(engine.model))\n\t\t\ty_pred = engine.model(imgs)\n\t\treturn y_pred, target", "def predict_category(self):\n pass", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self):\n prediction = np.multiply(self.alpha_vec, self.label[:,np.newaxis]).T\n pred = np.dot(prediction, np.inner(self.train_data, self.train_data)) + self.intercept\n self.prediction = np.sign(pred)\n return(self.prediction)", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def clf1_predict(self):\n self._pred_clf_1 = self._clf1.predict(self._vectorized_input)[0]", "def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted", "def predict(self, x):\n return self.model.predict(x, batch_size=1, verbose=0)", "def predict(self, X_pred):\n \n with tf.Session() as sess:\n self.saver.restore(sess, self.log_dir + '/model')\n\n y_pred = sess.run(self.output_class, feed_dict={self.X_tf: X_pred, self.keep_prob: 1.0})\n return y_pred", "def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels", "def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def predict(self, data):\n\t\traise NotImplementedError", "def predict(self, obs):\n pass", "def predict(self, X):\r\n num_test = X.shape[0]\r\n # lets make sure that the output type matches the input type\r\n Ypred = np.zeros(num_test, dtype = self.ytr.dtype)\r\n\r\n # loop over all test rows\r\n for i in range(num_test):\r\n print (\"Testing example \" + str(i))\r\n distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)\r\n # distances = self.chi2_distance(self.Xtr, X[i,:])\r\n min_index = np.argmin(distances) # get the index with smallest distance\r\n Ypred[i] = self.ytr[min_index] # predict the label of the nearest example\r\n print (\"Class Label: \" + str(Yte[i]) + \" \" + \"Predicted label: \" + str(Ypred[i]))\r\n return Ypred", "def fit_predict(self, X, y=None, sample_weight=None):\r\n return self.fit(X, sample_weight=sample_weight).labels_", "def predict(self, X, batch_size=50):\n features = self.encode(X, batch_size=batch_size)\n return self.classifier.predict(features)", "def predict(self, data, label_converter=None, batch_size=32, max_seq_len=128):\n if label_converter:\n assert type(label_converter) == LabelConverter, \"@Param: 'label_converter' must be of Type: LabelConverter\"\n assert len(label_converter) == self.config.num_labels, (f\"@Param: 'label_coverter has length of {len(label_converter)}.\"\n f\"Must have same length as config.num_labels: {self.config.num_labels}\")\n\n predictions_dataloader = setup_dataloader(data, None, max_seq_len, batch_size)\n predictions = []\n for batch in tqdm(predictions_dataloader, desc=\"Iteration\"):\n with torch.no_grad():\n batch = {k: t.to(self.device) for k, t in batch.items()}\n outputs = self.model(**batch)\n logits = outputs[0]\n confidence_scores = get_confidence_scores(logits)\n pred_indices = confidence_scores.max(-1)[-1].tolist()\n preds = (confidence_scores.tolist(), pred_indices)\n if label_converter:\n preds = preds + (label_converter.convert_indices(pred_indices),)\n predictions += list(zip(*preds))\n batch = {k: t.detach().cpu() for k, t in batch.items()}\n del batch\n torch.cuda.empty_cache()\n return predictions", "def predict(self, model, x_test):\n pass", "def predict(self, X, a, b):\n pass", "def predict(self):\n self.kf.predict()\n self.nb_kf_pred += 1\n if self.time_since_update > 0:\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(self.kf.x[:2].reshape(-1))\n return self.history[-1]", "def predict(self, conf):\n conf.set_int(\"angel.worker.matrix.transfer.request.timeout.ms\", 60000)\n predict(conf, conf._jvm.com.tencent.angel.ml.classification.lr.LRModel(conf._jconf, None), 'com.tencent.angel.ml.classification.lr.LRPredictTask')", "def predict(self): \n return self.model.predict(self.test_x)", "def predict(self, title):\n \n return self.knn_model.predict(self.target_lang_topics[title])", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def predict(self, X):\n return self.classifier.predict(X)", "def predict(self, X):\n\n ### TODO: YOUR CODE HERE\n D = shape(X)\n if self.isLeaf:\n return self.label\n else:\n if ( X[self.feature] <= 0.5 ):\n return self.left.predict(X)\n else:\n return self.right.predict(X)", "def fit_predict(self, X, y=None, sample_weight=None):\n self.fit(X, sample_weight=sample_weight)\n return self.labels_", "def predict(self, session, data, labels=None):\n\n losses,results = [],[]\n for step, (x,y) in enumerate(self.enhancer_iterator(data, labels, \n self.config.batch_size,\n self.config.num_steps)):\n if y is not None:\n cost, preds = session.run([self.cost, self.predictions],\n {self.input_data: x,\n self.targets: y,\n self.dropout: 1.0,\n self.initial_state: self.initial_state.eval()})\n losses.append(cost)\n else:\n preds = session.run(self.predictions,\n {self.input_data: x,\n self.dropout: 1.0,\n self.initial_state: self.initial_state.eval()})\n \n results.extend(np.argmax(preds,1))\n return np.exp(np.mean(losses)), results", "def predict_labels(self, x: list, decode=\"posterior\"):\n assert decode in self.decode, \"decode `{}` is not valid\".format(decode)\n \n if decode is 'posterior':\n return self.posterior_decode(x)\n \n if decode is 'viterbi':\n return self.viterbi_decode(x)", "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "def predict(self, seq):\n raise Exception(\"You cannot predict with a base predictor.\")", "def predict_word(word_path):\n word = word_path\n reshaped_word = shape_new_img(word)\n pred = model.predict(reshaped_word)\n get_class = np.argmax(pred)\n prediction = labels_list[get_class]\n return prediction", "def predict(self, X):\n\n num_test=X.shape[0]\n\n #make sure that the output type matches the input type\n Ypred = np.zeros(num_test, dtype=self.ytr.dtype)\n\n #Loop over all test rows\n for i in xrange(num_test): #https://www.geeksforgeeks.org/range-vs-xrange-python/\n #find the nearest training image to the ith test image\n #using the L1 distance (sum of absolute value differences)\n print \"Running test example %d\" % (i)\n #distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)\n distances = np.sum(np.square(self.Xtr - X[i,:]), axis=1)\n min_index = np.argmin(distances) #get the index with the smallest distance\n Ypred[i] = self.ytr[min_index] #predict the lable of the nearest example\n\n return Ypred", "def _predict_label(self, df_train, df_test, label=None):\n #train k-nearest neighbors classifier \n neigh = KNeighborsClassifier(n_neighbors=5)\n X, y = df_train[['longitude', 'latitude']], df_train[label]\n neigh.fit(X, y)\n #predict the label for wildfire incidents\n pred_label = neigh.predict(df_test[['longitude', 'latitude']])\n return pred_label", "def predict(self, review):\n raise NotImplementedError", "def predict(model, images):\n return model.predict_classes(images)", "def predict(self, observation):\n\t\t# TODO - complete this\n\t\tp_max = 0\n\t\tpredict = None\n\t\tfor label in self.possible_labels:\n\t\t\tpossiblity = 1\n\t\t\tlabel_gaussian = self.gaussians.get(label)\n\t\t\tfor i in range(len(observation)):\n\t\t\t\t(mean, std) = label_gaussian[0][i]\n\t\t\t\tvalue = observation[i]\n\t\t\t\tpossiblity *= self.gaussians_calc(value, mean, std)\n\t\t\tif p_max < possiblity:\n\t\t\t\tp_max = possiblity\n\t\t\t\tpredict = label\n\n\t\treturn predict" ]
[ "0.7966786", "0.78263235", "0.7803063", "0.7405601", "0.74039984", "0.7385424", "0.73802847", "0.73328096", "0.7332244", "0.73233455", "0.73233455", "0.73199046", "0.73199046", "0.73199046", "0.72741306", "0.72542346", "0.72518075", "0.72518075", "0.72518075", "0.72377795", "0.7224648", "0.7213648", "0.7213648", "0.71995866", "0.7166462", "0.7096082", "0.7079533", "0.7056952", "0.70556325", "0.7045076", "0.70426226", "0.70329607", "0.6983743", "0.69754916", "0.6958015", "0.6950234", "0.6940893", "0.6936124", "0.69342923", "0.69238424", "0.69150984", "0.69038033", "0.6897574", "0.6886041", "0.68793374", "0.6861207", "0.68499535", "0.68479055", "0.6840605", "0.6838512", "0.6837165", "0.6836781", "0.6831553", "0.6811802", "0.6808434", "0.6795044", "0.67932713", "0.67903906", "0.67830074", "0.67791635", "0.67777866", "0.67736584", "0.67717814", "0.67717814", "0.67717814", "0.67717814", "0.6770909", "0.6769332", "0.67662144", "0.67646587", "0.67578906", "0.67516947", "0.674589", "0.67413104", "0.6738077", "0.6732773", "0.67326945", "0.67293644", "0.67165923", "0.6712035", "0.6710911", "0.67107314", "0.671002", "0.67078197", "0.67013687", "0.6696006", "0.66942143", "0.6692752", "0.66920966", "0.66905373", "0.66788054", "0.6672382", "0.6669323", "0.66644025", "0.66623586", "0.66611236", "0.66573423", "0.66573095", "0.6654303", "0.664802", "0.6647651" ]
0.0
-1
Predict label given features from the model
def predict_on_features(self, h): logits = self.head(h.view(h.size(0), -1)) log_probs = F.log_softmax(logits, dim=-1) return log_probs, logits.argmax(dim=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(model, features):\n result = model.predict(features)\n return result", "def predict(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n label = self.clf.predict(vec)\n # print label\n return self.labelmap[label[0]]", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def predict_labels(model, x_test):\n \n pred = model.predict(x_test)\n #pred_labels = model.predict_classes(x_test) # depricated\n pred_labels = np.argmax(model.predict(x_test), axis=-1)\n \n return pred, pred_labels", "def predict(self, features):\n feature_labels = []\n for f in features:\n get_label = self.get_k_neighbors(f)\n c0 = get_label.count(0)\n c1 = get_label.count(1)\n if c0 >= c1:\n f_label = 0\n else:\n f_label = 1\n feature_labels.append(f_label)\n return feature_labels\n raise NotImplementedError", "def predict(self, X, **kwargs):\n\n X = sanitize_dataframe(X)\n\n for c in set(self._features).difference(set(X.columns.values)):\n X = X.assign(**{c: 1})\n\n X[\"label_prediction\"] = self._base_model.predict(X)\n\n return self._model.predict(X[self._features], **kwargs)", "def predict(self, X): \n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n j= 0\n predicted_labels = np.array([])\n while(j < X.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < X.shape[0] else X.shape[0]\n current_batch = X[j:current_batch_end]\n self._feedforward(current_batch)\n predicted_labels = np.append(predicted_labels, np.take(self.map_labels, self.bmu_indices))\n j = current_batch_end\n \n return predicted_labels", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(self, X):", "def predict(self, X):", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def predict(self, features):\n features_scaled = self.scaler.transform(features.reshape(1, -1))\n return self.clf.predict(features_scaled)", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, features):\n return self.search_results.predict(features)", "def predict(features):\n\n log.info(f\"Input: {features}\")\n\n if not is_valid_input(features):\n log.error()\n return \"Error\"\n\n features_processed = feature_processing(features)\n\n # apply model\n try:\n prediction = model.predict([features_processed])\n output = list(prediction)[0]\n except Exception as e:\n return \"Error:\" + str(e)\n\n if not is_valid_output(output):\n return\n\n return output", "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n\n y = np.transpose(df_target.values)\n X = np.transpose(df_features.values)\n\n path, beta, A, lam = hsiclasso(X, y)\n\n return beta", "def predict(self, inputs):\n if self.use_logistic:\n return self.predict_labels_logistic(self.w, inputs)\n return predict_labels(self.w, inputs)", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def _predict(self, X):\n predictions = np.asarray([clf.predict(X) for clf in self.clfs_]).T\n predicted_labels = self.combiner.combine(predictions)\n return predicted_labels", "def predict(self, model, x_test):\n pass", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def predict_only(self):", "def predict(clf, features):\n return clf.predict(features).astype(np.int)", "def predict(self, X):\n raise NotImplementedError", "def predict(self, predPoints=None):", "def predict(self, features):\n out_l = []\n one_features = np.concatenate((np.ones(features.shape[0])[:, np.newaxis], features), axis=1)\n for example in one_features:\n this_pred = 1 if self.w.dot(example) >= 0 else -1\n out_l.append(this_pred)\n self.out = out_l\n return np.array(out_l)", "def predict(self, eval_features):\n\t\tinput_ids = torch.tensor(eval_features.input_ids, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\tinput_mask = torch.tensor(eval_features.input_mask, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\tsegment_ids = torch.tensor(eval_features.segment_ids, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\t\n\t\twith torch.no_grad():\n\t\t\tlogits = self.model(input_ids, segment_ids, input_mask)\n\t\t\tlogits = logits.to(\"cpu\")\n\t\t\tsoftmax_logits = F.softmax(logits[0], dim=0).numpy()\n\t\t\tprint(\"softmax score : \", softmax_logits)\n# final_logits = list(zip(list(map(lambda x : self.reverse_label_map[np.ravel(np.where(softmax_logits==x))[0]], softmax_logits )), softmax_logits))\n\t\tpred = np.argmax(softmax_logits)\n\t\tprob = np.max(softmax_logits)\n\t\t\n\t\treturn pred , prob", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predict(self, model, context, data):\n pass", "def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):\n if predict_label is None:\n predict_label = self.pred_label\n if weight is None: weight = self.weights[-1]\n pred = self.predict(x, weight, cutting)\n pred[np.where(pred == 0)] = predict_label[0]\n pred[np.where(pred == 1)] = predict_label[1]\n return pred", "def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"", "def predict_labels(clf, features, target):\n\n # Start the clock, make predictions, then stop the clock\n start = time()\n y_pred = clf.predict(features)\n end = time()\n # Print and return results\n print(\"Made predictions in {:.4f} seconds\".format(end - start))\n return accuracy_score(target, y_pred)", "def predict_from(self, inputs, to_layers):", "def predict(self):\n raise NotImplementedError", "def predict(self, X, pred_batch_size=None):", "def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels", "def predict(self):\n self.get_test_data()\n predicted_labels = []\n for row in self.test_data:\n predicted_labels.append(DecisionTree.predict_row(self.classifier, row))\n return predicted_labels", "def predict(self):\n for column in self.data_to_predict.columns:\n if column not in list(self.selected_features_):\n self.data_to_predict.drop(column, axis=1, inplace=True)\n for column in list(self.selected_features_):\n if column not in self.data_to_predict.columns:\n self.data_to_predict.loc[:, column] = 0\n self.predictions = self.model.predict(\n self.data_to_predict[self.selected_features_])", "def predict(self, features):\n scores = self.predict_proba(features)\n return self.classes[np.argmax(scores)]", "def model_predict(classifier, X_test:list) -> list:\n y_predict = classifier.predict(X_test)\n return y_predict", "def predict(self, **kwargs):\n raise NotImplementedError", "def predict(self, xs, **kwargs):", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def predict(self, X, y=None):\n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n _, Predicted_Labels =\\\n RankSVM_test(test_data=X,\n num_class=self.num_class,\n Weights=self.Weights,\n Bias=self.Bias,\n SVs=self.SVs,\n svm=self.svm, gamma=self.gamma,\n coefficient=self.coefficient,\n degree=self.degree)\n\n return Predicted_Labels", "def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})", "def predict(self, text):\n tokens = ['[CLS]'] + self.tokenizer.tokenize(text) + ['[SEP]']\n xx = self.tokenizer.convert_tokens_to_ids(tokens)\n xx = torch.tensor(xx).unsqueeze(0).to(self.device)\n _, y_hat = self.model(xx)\n pred_tags = []\n for tag in y_hat.squeeze():\n pred_tags.append(idx2tag[tag.item()])\n return pred_tags, tokens", "def predict(self, X, a, b):\n pass", "def predict_model():\n data = request.json\n\n if data:\n predictor.pred_dict[\"model\"] = data[\"model\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'", "def predict(self, X):\n\n y_pred = np.zeros(X.shape[0])\n y_pred = np.argmax(np.dot(X,self.W), axis=1)\n ###########################################################################\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n\n return y_pred", "def predict(self, X, k=1):\n dists = self.compute_distances(X)\n return self.predict_labels(dists, k=k)", "def predict(self, X):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.predict(stuff)\n return result\n pass", "def predict(self,X):\n y_pred = np.random.choice(self.labels, size=(X.shape[0],), p=self.thresholds)\n return y_pred", "def predict(self, X):\n # Check is fit had been called\n check_is_fitted(self)\n X = self._clean(X)\n y = self.model_.predict(X)\n return self.map_label_inverse_(y)", "def fit_predict(self, X, y=None, sample_weight=None):\r\n return self.fit(X, sample_weight=sample_weight).labels_", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def predict_random_forest(X_test, model):", "def predict_evidences(self, X):", "def predict(self, testFeatures): \r\n\r\n if(not self._fitCalled):\r\n print('The fit method has not been called yet')\r\n return None\r\n\r\n l,d = testFeatures.shape\r\n n,d = self.data.shape \r\n\r\n \"\"\" Fill and return this in your implementation. \"\"\"\r\n predictions = np.empty(shape=(l,), dtype=self.labels.dtype)\r\n\r\n \"\"\" Implement kNN prediction here. \"\"\"\r\n\r\n for i in range(0, l):\r\n distances = []\r\n for j in range(0, n):\r\n distances.append((np.sqrt(np.sum((testFeatures[i]-self.data[j])**2)), self.labels[j]))\r\n distances.sort()\r\n kNearestLabels = [x[1] for x in distances][0:self.k]\r\n most_common, num_most_common = Counter(kNearestLabels).most_common(1)[0]\r\n predictions[i] = most_common\r\n return predictions", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self, df):\n # TODO: REMOVE type column\n\n tokenizer = self.__create_tokenizer_from_hub_module()\n label_list = test_other[LABEL_COLUMN].unique().tolist()\n #label_list = [0, 1]\n test_features = self.__create_features(\n df, label_list,\n self.max_seq_len, tokenizer, 'text', 'type'\n )\n\n preds = []\n if type(self.model) == tf.estimator.Estimator:\n # Is trained\n input_fn = input_fn_builder(\n features=test_features,\n seq_length=self.max_seq_len,\n is_training=False,\n drop_remainder=False)\n pred = self.model.predict(input_fn=input_fn)\n for p in pred:\n preds.append(p)\n else:\n # Is loaded from a SavedModel\n # Format inputs\n inpu = {\n 'label_ids': np.array([x.label_id for x in test_features]).reshape(-1,),\n 'input_ids': np.array([x.input_ids for x in test_features]).reshape(-1, self.max_seq_len),\n 'input_mask': np.array([x.input_mask for x in test_features]).reshape(-1, self.max_seq_len),\n 'segment_ids': np.array([x.segment_ids for x in test_features]).reshape(-1, self.max_seq_len)\n }\n preds = self.model(inpu)\n\n return preds", "def predict(self, instances):\r\n raise NotImplementedError", "def make_predict(\n data: List,\n features: List[str]\n) -> List:\n global model\n data = pd.DataFrame([data], columns=features)\n predicts = model.predict(data)\n\n return predicts.tolist()", "def predict(model, images):\n return model.predict_classes(images)", "def predict(self, test_file_path: str) -> List[Dict[str, float]]:\n # TODO write code to extract features from test_file_path and \n # predict the labels for the model.\n pass", "def make_predictions(df):\n t_labels = get_labels(\"labels_pca\")\n # clean data\n df = clean_data(df)\n # engineer data\n df = engineer_features(df)\n # predict\n with open(\"model.pkl\",\"r\") as mdl:\n model = pickle.load(mdl)\n mdl.close()\n predictions = model.predict(df[t_labels])\n return predictions", "def predict(self, data_in):\n pass", "def predict(self, data: List):", "def predict(self, data):\n\t\traise NotImplementedError", "def predict_category(self):\n pass", "def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]", "def predict(test_dataset,test_tX,weights):\n for idx, dataset in enumerate(test_tX):\n test_dataset[idx]['Prediction'] = predict_labels(weights[idx],dataset)\n return test_dataset", "def predict(self, x):\n \n\n return predictions", "def predict(self, X, batch_size=50):\n features = self.encode(X, batch_size=batch_size)\n return self.classifier.predict(features)", "def predict_features(df_features, df_target, idx=0, **kwargs):\n nh = kwargs.get('nh', 20)\n x = th.FloatTensor(scale(df_features.as_matrix()))\n y = th.FloatTensor(scale(df_target.as_matrix()))\n model = FSGNN_model([x.size()[1], nh, 1], **kwargs)\n\n return model.train(x, y, **kwargs)", "def test_predict(self):\n\n classifier = BertCCAMClassifier()\n classifier.load_model(\"models\")\n prediction = classifier.predict([\"bartosz\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}])\n\n # with multiple labels\n prediction = classifier.predict([\"ala bert\"])\n self.assertEqual(prediction, [{\"labels\": (\"A\", \"B\")}])\n\n # in a batch\n prediction = classifier.predict([\"bartosz\", \"adam\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}, {\"labels\": (\"A\",)}])", "def fit(self, X_train, y_train, **kwargs):\n X_train[\"label_prediction\"] = self._base_model.predict(X_train)\n\n self._features = X_train.columns.values\n self._model.fit(X_train, y_train, **kwargs)", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def predict(x):\n model = Model()\n res = model.predict([x])[0][0]\n click.echo(res)", "def predict_label(examples_set):\n all_labels = list(('yes', 'no'))\n prediction = 'no'\n\n for label in all_labels:\n all_same_label = True\n for example in examples_set:\n if example[14] != label:\n all_same_label = False\n break\n if all_same_label:\n prediction = label\n break\n return prediction", "def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def predict(self, X, **kwargs):\n\n X = sanitize_dataframe(X)\n\n for c in set(self._features).difference(set(X.columns.values)):\n X = X.assign(**{c: 1})\n\n return self._model.predict(X[self._features], **kwargs)", "def model_predict(model,x_test,y_test):\n\n\n y_pred = model.predict(x_test)\n\n predict_class = np.argmax(y_pred, axis=1)\n\n predict_class = predict_class.tolist()\n\n return(y_pred,predict_class)", "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "def predict(self, X):\n # yields labels of the given dataset X after calling predict_proba\n A=self.predict_proba(X)\n y_hat=np.argmax(A,axis=0)\n return y_hat", "def predict(self, X):\n\n ### TODO: YOUR CODE HERE\n D = shape(X)\n if self.isLeaf:\n return self.label\n else:\n if ( X[self.feature] <= 0.5 ):\n return self.left.predict(X)\n else:\n return self.right.predict(X)", "def _predict_label(self, df_train, df_test, label=None):\n #train k-nearest neighbors classifier \n neigh = KNeighborsClassifier(n_neighbors=5)\n X, y = df_train[['longitude', 'latitude']], df_train[label]\n neigh.fit(X, y)\n #predict the label for wildfire incidents\n pred_label = neigh.predict(df_test[['longitude', 'latitude']])\n return pred_label", "def predict_tokens(self, tokens):\n return", "def predict(self):\n prediction = np.multiply(self.alpha_vec, self.label[:,np.newaxis]).T\n pred = np.dot(prediction, np.inner(self.train_data, self.train_data)) + self.intercept\n self.prediction = np.sign(pred)\n return(self.prediction)" ]
[ "0.7955748", "0.78675884", "0.7786666", "0.76372766", "0.7480364", "0.74767727", "0.74300313", "0.7427345", "0.73632777", "0.73632777", "0.7309799", "0.7309799", "0.7309799", "0.72233385", "0.7213071", "0.72049874", "0.72049874", "0.72049874", "0.72035515", "0.71662915", "0.7165358", "0.71545315", "0.71459466", "0.71056616", "0.71014", "0.70652664", "0.7046675", "0.7046675", "0.7027856", "0.7026039", "0.7016975", "0.6997081", "0.697498", "0.6971534", "0.6960522", "0.6953327", "0.69523734", "0.6901527", "0.68909675", "0.6889293", "0.68521625", "0.68504924", "0.684827", "0.68447906", "0.682339", "0.68196666", "0.6818692", "0.6810342", "0.6793462", "0.67801017", "0.67660403", "0.67617226", "0.67589897", "0.675063", "0.6748684", "0.6741175", "0.67328966", "0.6714891", "0.6712936", "0.67101", "0.6703861", "0.67030346", "0.6697617", "0.66972506", "0.66943604", "0.66880935", "0.66880935", "0.66880935", "0.66880935", "0.668624", "0.6677097", "0.6673433", "0.66713876", "0.6671376", "0.6664937", "0.6663894", "0.66596246", "0.66562134", "0.6654929", "0.664914", "0.6646189", "0.664227", "0.66370785", "0.6636289", "0.6635516", "0.6634195", "0.66279644", "0.66229224", "0.6620758", "0.6609881", "0.6605153", "0.6605153", "0.6593338", "0.6591126", "0.6588714", "0.6580774", "0.65739226", "0.6573908", "0.6571038", "0.65693855", "0.65692335" ]
0.0
-1
Score a collection of labels and predictions. Usually this is accuracy, but it depends on the task.
def score(self, y_hat, y): return (y_hat == y.to(y_hat.device)).float().mean().item()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, X_test: List[str], y_test: List[str]) -> int:\n predictions_count = 0\n right_predictions_count = 0\n\n for i in range(len(X_test)):\n label = self.predict(X_test[i].split())\n predictions_count += 1\n right_predictions_count += 1 if label == y_test[i] else 0\n\n return right_predictions_count / predictions_count", "def score(self, predicted_y, actual_y):\n\n self.labels = [0, 1, 2, 3]\n\n # Dictionary to store count of each label in predicted labels list\n self.total_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}\n\n # Dictionary to store count of each label in actual labels list\n self.total_actual_count = {0: 0, 1: 0, 2: 0, 3: 0}\n\n # Dictionary to store count of correctly predicted labels\n self.total_correct_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}\n\n for i in xrange(len(predicted_y)):\n # Extract predicted and actual labels for ith record\n predicted_label = predicted_y[i]\n actual_label = actual_y[i]\n\n # Increment the count of corrected predicted label if predicted and actual labels are same\n if predicted_label == actual_label:\n self.total_correct_prediction_count[actual_label] += 1\n\n # Increment total counts\n self.total_actual_count[actual_label] += 1\n self.total_prediction_count[predicted_label] += 1\n\n # Compute f1 scores of each label and return their mean\n return np.mean(self.__get_f1_scores__())", "def accuracy(preds, labels):\n correct = preds == labels\n return correct.sum().float() / correct.shape[0]", "def score(self, y_true, y_pred):\r\n pass", "def score(self, X, y):\n predictions = self.predict(X)\n total_values = len(y)\n accuracy = 0\n if 'classification' == self.label_type:\n correct_values = np.where(predictions == y)\n accuracy = correct_values[0].size / total_values\n elif 'regression' == self.label_type:\n sse = (y - predictions) ** 2\n sse_summed = np.sum(sse)\n accuracy = sse_summed / total_values\n\n return accuracy", "def accuracy(predictions, labels):\n predictions = list(predictions)\n labels = list(labels)\n count = 0\n for i in range(len(labels)):\n if labels[i] == predictions[i]:\n count += 1\n\n return count / len(labels)", "def score(self, phrases):\n pred = self.predict(phrases)\n return accuracy_score(preprocessor.getLabels(phrases), pred)", "def score(self, X, Y):\n # calls predict on X and predicts labels, and compares it with true labels to return accuracy\n y_hat=self.predict(X)\n Y=np.argmax(Y,axis=1)\n atrain_costuracy=(y_hat==Y).mean()\n return atrain_costuracy", "def evaluate_prediction(classifier, test_data, labels):\n \n predictions = classifier.predict(test_data)\n \n return accuracy_score(labels, predictions)", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def eval_metrics_for_multiclass(self, predicted_answers):\n total_correct_in_all = 0\n total_pred_in_all = len(predicted_answers)\n # initial a dict for total correct in topK counting.\n total_correct_in_topK = dict([(i, 0) for i in self.topK_list])\n total_pred_in_topK = dict([(i, 0) for i in self.topK_list])\n max_topK = max(self.topK_list)\n label_pred = []\n label_true = []\n label_weights = []\n digits = 3\n metrics = {}\n\n for e_id, sample in predicted_answers.iteritems():\n # get all correct ids\n correct_label_indices = sample['correct_labels']\n # current case, we only have a majority lable for the correct label\n label_true.append(correct_label_indices[0])\n # counting all correct for each sample\n total_correct_in_all += len(correct_label_indices)\n # select topK\n sorted_probs_max_topK = sorted(sample['pred_probs'], reverse=True, key=lambda x: x['prob'])[:max_topK]\n top1_pred = sorted_probs_max_topK[0]\n label_pred.append(top1_pred['label_index'])\n\n # for all topK predictions\n for i in range(len(sorted_probs_max_topK)):\n pred = sorted_probs_max_topK[i]\n for topK in self.topK_list:\n if i >= topK:\n continue\n else:\n total_pred_in_topK[topK] += 1\n if pred['label_index'] in correct_label_indices:\n total_correct_in_topK[topK] += 1\n\n if total_correct_in_all != 0:\n # recall@K\n recall_at_K = dict([(k, total_correct_in_topK[k] / (total_correct_in_all * 1.0)) for k in self.topK_list])\n # assign recall@K into metrics\n for k, v in recall_at_K.items():\n # Jie\n # 1 means the greater the better.\n # -1 means the smaller the better.\n metrics['R@{}'.format(k)] = (1, v)\n\n self.logger.info('total_correct_in_all = {}, correct_in_topK = {}, recall@K = {}'.format(total_correct_in_all, sorted(total_correct_in_topK.items()), sorted(recall_at_K.items())))\n # here return all the p,r,f for each label, then we compute the micro average later.\n p, r, f1, s = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average=None)\n total_s = np.sum(s)\n p_micro, r_micro, f1_micro, _ = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average='micro')\n last_lines_heading = ['macro / total', 'weighted_mac / total', 'micro / total']\n target_names = self.classes\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, max([len(x) for x in last_lines_heading]), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)\n report = head_fmt.format(u'', *headers, width=width)\n report += u'\\n\\n'\n row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\\n'\n rows = zip(target_names, p, r, f1, s)\n for row in rows:\n label_weights.append(row[4])\n report += row_fmt.format(*row, width=width, digits=digits)\n metrics['P_{}'.format(row[0])] = (1, row[1])\n metrics['R_{}'.format(row[0])] = (1, row[2])\n metrics['F1_{}'.format(row[0])] = (1, row[3])\n report += u'\\n'\n\n # compute macro averages\n p_macro = np.average(p, weights = None)\n r_macro = np.average(r, weights = None)\n f1_macro = np.average(f1, weights = None)\n metrics['P_{}'.format(\"macro\")] = (1, p_macro)\n metrics['R_{}'.format(\"macro\")] = (1, r_macro)\n metrics['F1_{}'.format(\"macro\")] = (1, f1_macro)\n report += row_fmt.format(last_lines_heading[0],\n p_macro,\n r_macro,\n f1_macro,\n total_s,\n width=width, digits=digits)\n\n # compute weighted macro average\n label_weights = map(lambda x : x/(total_s * 1.0), label_weights)\n p_weighted_average = np.average(p, weights = label_weights)\n r_weighted_average = np.average(r, weights = label_weights)\n f1_weighted_average = np.average(f1, weights = label_weights)\n metrics['P_{}'.format(\"weighted_macro\")] = (1, p_weighted_average)\n metrics['R_{}'.format(\"weighted_macro\")] = (1, r_weighted_average)\n metrics['F1_{}'.format(\"weighted_macro\")] = (1, f1_weighted_average)\n report += row_fmt.format(last_lines_heading[1],\n p_weighted_average,\n r_weighted_average,\n f1_weighted_average,\n total_s,\n width=width, digits=digits)\n # micro average\n metrics['P_{}'.format(\"micro\")] = (1, p_micro)\n metrics['R_{}'.format(\"micro\")] = (1, r_micro)\n metrics['F1_{}'.format(\"micro\")] = (1, f1_micro)\n report += row_fmt.format(last_lines_heading[2],\n p_micro,\n r_micro,\n f1_micro,\n total_s,\n width=width, digits=digits)\n\n self.logger.info(\"P,R,F1 report as follows:\\n {}\".format(report))\n # only plot it at dev and test time, not during training.\n if self.gen_confusing_matrix:\n\n self.logger.info(\"Generate confusing matrix photo.\")\n # Compute confusion matrix\n conf_matrix = confusion_matrix(label_true, label_pred)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d',\n title='Confusion matrix, without normalization')\n wo_norm_fig_path = os.path.join(self.result_dir, '{}_wo_norm.png'.format(self.result_prefix))\n plt.savefig(wo_norm_fig_path)\n\n # Plot normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d', normalize=True,\n title='Normalized confusion matrix')\n\n norm_fig_path = os.path.join(self.result_dir, '{}_w_norm.png'.format(self.result_prefix))\n plt.savefig(norm_fig_path)\n\n else:\n self.logger.warn('invalid total_correct_in_all')\n\n return metrics", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def score(self, predictions):\n return 0.", "def evaluate(labels, predictions):\n actual_positive = 0\n actual_negative = 0\n predicted_positive = 0\n predicted_negative = 0\n for i, j in zip(labels, predictions):\n if i == 1:\n actual_positive += i\n predicted_positive += j\n else:\n actual_negative += 1\n if j == 0:\n predicted_negative += 1\n return predicted_positive/actual_positive, predicted_negative/actual_negative", "def score_prediction(y_true, y_pred):\n return [round(metrics.accuracy_score(y_true, y_pred)*100, 2),\n round(metrics.precision_score(y_true, y_pred)*100, 2),\n round(metrics.recall_score(y_true, y_pred)*100, 2),\n round(metrics.f1_score(y_true, y_pred)*100, 2)]", "def accuracy(labels, predictions):\n if len(labels) != len(predictions):\n return -1\n\n correct = 0\n total = 0\n\n for i,v in enumerate(predictions):\n if labels[i] == str(v):\n correct += 1\n total += 1\n\n return (float(correct) / float(total)) * 100.0", "def evaluate(y_test, pred_labels):\n \n # Converts one-hot code to a label (the index of 1)\n y_test_labels = np.argmax(y_test, axis=1)\n \n # Compare test labels to predicted labels\n score = accuracy_score(y_test_labels, pred_labels)\n \n return y_test_labels, score", "def accuracy(targets: List[List[float]], predict: List[List[float]]):\r\n correct = 0\r\n for i in range(len(targets)):\r\n if predict[i] == targets[i]:\r\n correct += 1\r\n return correct / len(targets) * 100", "def evaluate(labels, predictions):\n pos = 0\n neg = 0\n true_pos_rate = 0\n true_neg_rate = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n pos += 1\n else:\n neg += 1\n if predictions[i] == labels[i]:\n if predictions[i] == 1:\n true_pos_rate += 1\n else:\n true_neg_rate += 1\n \n sensitivity = true_pos_rate / pos\n specificity = true_neg_rate / neg\n\n return (sensitivity, specificity)", "def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity", "def calc_metrics(pred, labels):\n pred_flat = np.argmax(pred, axis = 1).flatten()\n labels_flat = labels.flatten()\n \n flat_accuracy = np.sum(pred_flat == labels_flat) / len(labels_flat)\n \n # sklearn takes first parameter as the true label\n precision = precision_score(labels_flat, pred_flat)\n recall = recall_score(labels_flat, pred_flat)\n \n return flat_accuracy, precision, recall", "def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\n\t\t# One hot encode the input/labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(outs)\n\t\tenc_labels = encoder.transform(outs)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t_, score = self.model.evaluate(ins, enc_labels, verbose=2)\n\n\t\treturn score", "def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity", "def evaluate(predicted, labels):\n \n assert len(predicted) == len(labels), \"Different number of predictions and labels.\"\n \n total = len(predicted)\n movie_correct = 0\n location_correct = 0\n \n center_frame_dist = [] \n overlaps = []\n \n for pred, label in zip(predicted, labels):\n \n dist = 0\n \n if pred[0] == label[0]: # Check if movie is correct\n movie_correct += 1\n \n dist = abs(pred[1] - ((label[1]+label[2])/2)) \n center_frame_dist.append(dist)\n \n correct = False\n if label[1] <= pred[1] <= label[2]:\n correct = True\n location_correct += 1\n\n \n# print(\"Label: ({:s}, {:d}, {:d}), predicted: ({:s}, {:d}), location correct: {!s:}, start_frame_dist: {:d}, overlap: {:d}\".format(\n# *label,\n# *pred,\n# correct,\n# dist\n# ))\n \n # Return (# movies correct, # correct location, # total movies) and (avg start frame distance, std)\n return (movie_correct, location_correct, total), (np.mean(center_frame_dist), np.std(center_frame_dist))", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def evaluate(labels, predictions):\n TP = 0\n actualP = 0\n TN = 0\n actualN = 0\n for label, prediction in zip(labels, predictions):\n if label ==1:\n actualP +=1\n if prediction == 1:\n TP +=1\n else:\n actualN +=1\n if prediction ==0:\n TN +=1\n \n sensitivity = float(TP/actualP)\n specificity = float(TN/actualN)\n return (sensitivity, specificity)", "def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])", "def accuracy(labels, preds):\n\tassert labels.shape[0]==preds.shape[0]\n\treturn np.sum(preds==labels)/float(labels.shape[0])", "def evaluate(labels, predictions):\n # create 4 variables to represent sensitivity,specificity,total_positive values & total_negative values.\n sensitivity = float(0)\n specificity = float(0)\n\n total_positive = float(0)\n total_negative = float(0)\n\n # run through a for loop to evaluate the sensitivity and specificity of a data set\n for label, prediction in zip(labels, predictions):\n\n if label == 1:\n total_positive += 1\n if prediction == label:\n sensitivity += 1\n\n if label == 0:\n total_negative += 1\n if prediction == label:\n specificity += 1\n\n # data normalization\n sensitivity /= total_positive\n specificity /= total_negative\n\n return sensitivity, specificity", "def evaluate(labels, predictions):\n\n true_positives = 0\n label_positives = 0\n\n true_negatives = 0\n label_negatives = 0\n\n for i in range(len(predictions)):\n if labels[i] == predictions[i] == 1:\n true_positives += 1\n if labels[i] == 1:\n label_positives += 1\n\n if labels[i] == predictions[i] == 0:\n true_negatives += 1\n if labels[i] == 0:\n label_negatives += 1\n\n return true_positives / label_positives, true_negatives / label_negatives\n\n # raise NotImplementedError", "def label_accuracy_score(label_trues, label_preds, n_class):\n hist = np.zeros((n_class, n_class))\n # 一个batch里面可能有多个数据\n # 通过迭代器将一个个数据进行计算\n for lt, lp in zip(label_trues, label_preds):\n # numpy.ndarray.flatten将numpy对象拉成1维\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)\n\n # np.diag(a)假如a是一个二维矩阵,那么会输出矩阵的对角线元素\n # np.sum()可以计算出所有元素的和。如果axis=1,则表示按行相加\n \"\"\"\n acc是准确率 = 预测正确的像素点个数/总的像素点个数\n acc_cls是预测的每一类别的准确率(比如第0行是预测的类别为0的准确率),然后求平均\n iu是召回率Recall,公式上面给出了\n mean_iu就是对iu求了一个平均\n freq是每一类被预测到的频率\n fwavacc是频率乘以召回率,我也不知道这个指标代表什么\n \"\"\"\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n # nanmean会自动忽略nan的元素求平均\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_iu, fwavacc", "def accuracy(cls, test_labels):\n N = len(test_labels)\n\n # Calculate total correct as precentage\n total_correct = 100*(N - np.count_nonzero(cls - test_labels))/N\n\n # Calculate precentag correct for each class\n lab = np.unique(test_labels)\n cls_correct = {}\n for label in lab:\n idx = np.where(test_labels == label)[0]\n N_cls = len(idx)\n cls_correct[label] = 100*(N_cls - np.count_nonzero(label -\n cls[idx]))/N_cls\n\n print(\"Accuracy for:\")\n print(\"All classes is %.2f%%\" % total_correct)\n for label in lab:\n print(\"Class %d is %.2f%%\" % (label, cls_correct[label]))\n return(total_correct, cls_correct)", "def evaluate(labels, predictions):\n #labels and predictions\n truePos = 0\n trueNeg = 0\n for data in range(len(labels)):\n if((predictions[data] == 1) and (predictions[data] == labels[data])):\n truePos+=1\n elif((predictions[data] == 0) and (predictions[data] == labels[data])):\n trueNeg+=1\n sensitivity = truePos/(len(labels) + 1)\n specificity = trueNeg/(len(labels) + 1)\n return (sensitivity, specificity)\n \n\n #raise NotImplementedError", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def accuracy_score(preds, y):\n accuracy = sum([1 for i in range (len(preds)) if preds[i] == y[i]])*1.0/len(preds) \n return accuracy", "def metrics(self, predictions, gts, label_list):\n prediction_labels = np.concatenate([predictions.flatten()])\n gt_labels = np.concatenate([gts.flatten()])\n\n cm = metrics.confusion_matrix(\n gt_labels,\n prediction_labels,\n range(len(label_list)))\n\n # print(\"Confusion matrix :\")\n # print(cm)\n # print(\"---\")\n # Compute global accuracy\n accuracy = sum([cm[x][x] for x in range(len(cm))])\n total = sum(sum(cm))\n oa = accuracy * 100 / float(total)\n # print(\"{} pixels processed\".format(total))\n # print(\"Total accuracy : {}%\".format(accuracy * 100 / float(total)))\n # print(\"---\")\n # Compute kappa coefficient\n total = np.sum(cm)\n pa = np.trace(cm) / float(total)\n pe = np.sum(np.sum(cm, axis=0) * np.sum(cm, axis=1)) / float(total * total)\n kappa = (pa - pe) / (1 - pe)\n # print(\"Kappa: \" + str(kappa))\n return kappa, oa", "def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)", "def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)", "def evaluate(labels, predictions):\n\n # Positive and positive identified count\n pos = 0\n posid = 0\n\n # Negative and positive identified count\n neg = 0\n negid = 0\n\n for label, pred in zip(labels, predictions):\n if label == 1:\n pos += 1\n if pred == 1:\n posid += 1\n elif label == 0:\n neg += 1\n if pred == 0:\n negid += 1\n else:\n raise ValueError\n\n # `sensitivity` should be a floating-point value from 0 to 1\n # representing the \"true positive rate\": the proportion of\n # actual positive labels that were accurately identified.\n sens = float(posid / pos)\n\n # `specificity` should be a floating-point value from 0 to 1\n # representing the \"true negative rate\": the proportion of\n # actual negative labels that were accurately identified.\n spec = float(negid / neg)\n\n return (sens, spec)", "def accuracy(labels, predictions, n_classes):\n\t\tequality = tf.equal(x = predictions, y = labels) # match the type of labels\n\t\treturn tf.reduce_mean(tf.cast(equality, tf.float32))", "def evaluate(prediction_folder, label_folder, verbose=False):\n prediction_tasks = next(os.walk(prediction_folder))[1]\n label_tasks = next(os.walk(label_folder))[1]\n # prediction_tasks = label_tasks = ['mlqa', 'tydiqa', 'xquad']\n\n detailed_scores = {}\n for task, langs in TASK2LANGS.items():\n if task in prediction_tasks and task in label_tasks:\n suffix = \"json\" if task in GROUP2TASK[\"qa\"] else \"tsv\"\n # collect scores over all languages\n score = defaultdict(dict)\n for lg in langs:\n prediction_file = os.path.join(prediction_folder, task, f\"test-{lg}.{suffix}\")\n label_file = os.path.join(label_folder, task, f\"test-{lg}.{suffix}\")\n score_lg = evaluate_one_task(prediction_file, label_file, task, language=lg)\n for metric in score_lg:\n score[metric][lg] = score_lg[metric]\n # average over all languages\n avg_score = {}\n for m in score:\n avg_score[f'avg_{m}'] = sum(score[m].values()) / len(score[m])\n score.update(avg_score)\n if task in GROUP2TASK[\"qa\"]:\n score['avg_metric'] = (score['avg_exact_match'] + score['avg_f1']) / 2\n elif 'avg_f1' in score:\n score['avg_metric'] = score['avg_f1']\n elif 'avg_accuracy' in score:\n score['avg_metric'] = score['avg_accuracy']\n detailed_scores[task] = score\n if verbose:\n avg_result = ', '.join(['{}={:.1f}'.format(k, v) for k, v in score.items() if k.startswith('avg')])\n print('- Evaluate {}:\\t{}'.format(task, avg_result))\n\n # Display logic:\n overall_scores = {}\n all_tasks = set(TASK2LANGS.keys())\n available_tasks = set(detailed_scores.keys())\n\n # If scores of all tasks are available, show the overall score in the main table\n if all_tasks == available_tasks:\n overall_scores['all_task'] = sum(detailed_scores[task]['avg_metric'] for task in all_tasks) / len(all_tasks)\n\n # If scores of all tasks in a sub group are available, show the score in the sub table\n for group, group_tasks in GROUP2TASK.items():\n if len(set(group_tasks) - available_tasks) == 0:\n overall_scores[group] = sum(detailed_scores[task]['avg_metric'] for task in group_tasks) / len(group_tasks)\n\n return overall_scores, detailed_scores", "def compute_accuracy(labels=None, predictions=None):\n labels = np.array(labels, dtype=np.int32)\n if len(labels.shape) == 2:\n labels = np.argmax(labels, -1)\n return np.sum(np.equal(labels, predictions)) / np.size(labels)", "def accuracy(outputs, labels):\n predicted = outputs.argmax(dim=1)\n correct = (predicted == labels).sum().item()\n return correct / labels.size(0)", "def prediction_processing(predictions, labels, threshold, step_nb):\n new_labels = []\n new_predictions = []\n number_sequences = step_nb//50\n\n for k in range(len(labels)//number_sequences):\n total_prediction = 0\n isLabelTrue = labels[number_sequences*k]\n for i in range(number_sequences):\n total_prediction += (1/predictions[number_sequences*k+i])\n if not(isLabelTrue == (labels[number_sequences*k+i])):\n logger.error('Problem.')\n if total_prediction > threshold:\n total_prediction = False\n else:\n total_prediction = True\n new_labels.append(isLabelTrue)\n new_predictions.append(total_prediction)\n\n recall_1 = recall_score(new_labels, new_predictions)\n recall_0 = recall_score(new_labels, new_predictions, pos_label=0)\n precision_1 = precision_score(new_labels, new_predictions)\n precision_0 = precision_score(new_labels, new_predictions, pos_label=0)\n return((recall_1, recall_0, precision_1, precision_0), new_predictions, new_labels)", "def score(self, X_test, y_test):\n correct = []\n for one in X_test:\n correct.append(self.predict(one))\n try:\n return sum(0 if correct[i] != y_test[i] else 1 for i in range(len(X_test))) / len(\n X_test\n )\n except ZeroDivisionError:\n pass", "def computeAccuracy(self, targetLabels, actualLabels):\r\n self.accuracy = (0.0 + sum([1 for x in map(lambda y,z:(y,z), targetLabels, actualLabels) if x[0] == x[1]])) / len(targetLabels)\r\n return self.accuracy", "def test(self, test_instances, test_labels):\n scores = self.classifier.predict(test_instances)\n # TODO: print report", "def scores(self, y, y_pred):\n\n aucroc = 0.\n precision = 0.\n recall = 0.\n f1 = 0.\n aucroc_labs = np.zeros(self.datas[self.train_idx].n_labels)\n precision_labs = np.zeros(self.datas[self.train_idx].n_labels)\n recall_labs = np.zeros(self.datas[self.train_idx].n_labels)\n f1_labs = np.zeros(self.datas[self.train_idx].n_labels)\n label_ratios = np.mean(y, axis=0)\n\n if len(y) > 1:\n y_t = np.transpose(y)\n col_keep = np.ones(len(y_t), dtype=bool)\n for i, col_y in enumerate(y_t):\n if 0 not in col_y or 1 not in col_y:\n col_keep[i] = False\n\n if sum(col_keep) > 0:\n if not col_keep.all():\n y = np.transpose(y_t[col_keep])\n y_pred = np.transpose(np.transpose(y_pred)[col_keep])\n\n f1 = f1_score(y, self._round(y_pred), average=self.metrics_avg)\n s = f1_score(y, self._round(y_pred), average=None)\n f1_labs[col_keep] = s if sum(col_keep) > 1 else s[1]\n aucroc = roc_auc_score(y, y_pred, average=self.metrics_avg)\n aucroc_labs[col_keep] = roc_auc_score(y, y_pred, average=None)\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n recall = recall_score(y, self._round(y_pred), average=self.metrics_avg)\n if sum(col_keep) > 1:\n precision_labs[col_keep] = precision_score(y, self._round(y_pred), average=None)\n recall_labs[col_keep] = recall_score(y, self._round(y_pred), average=None)\n else:\n precision_labs[col_keep] = precision_score(y, self._round(y_pred))\n recall_labs[col_keep] = recall_score(y, self._round(y_pred))\n elif self.verbose:\n print('*Cannot compute other metrics because no label in Truth has alternatives, only precision*')\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n precision_labs = precision_score(y, self._round(y_pred), average=None)\n\n elif len(y) == 1:\n if self.verbose:\n print('*Cannot compute other metrics with %d samples, only precision*' % len(y))\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n precision_labs = precision_score(y, self._round(y_pred), average=None)\n\n result = {\n 'aucroc': aucroc,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'aucroc_labs': aucroc_labs,\n 'precision_labs': precision_labs,\n 'recall_labs': recall_labs,\n 'f1_labs': f1_labs,\n 'label_ratios': label_ratios\n }\n\n return result", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n correct = 0\n for i in range(len(targets)):\n if(predictions[i] == targets[i]):\n correct += 1\n accuracy = correct/len(targets)\n #raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))", "def _detection_scores(inputs, gt_boxes, gt_labels, model):\n model = check_model('model', model, BlackModel)\n boxes_and_confi, pred_labels = model.predict(*inputs)\n det_scores = []\n correct_labels_num = []\n # repeat gt_boxes and gt_labels for all particles cloned from the same sample in PSOAttack/GeneticAttack\n if gt_boxes.shape[0] == 1 and boxes_and_confi.shape[0] > 1:\n gt_boxes = np.repeat(gt_boxes, boxes_and_confi.shape[0], axis=0)\n gt_labels = np.repeat(gt_labels, boxes_and_confi.shape[0], axis=0)\n iou_thres = 0.5\n for boxes, labels, gt_box, gt_label in zip(boxes_and_confi, pred_labels, gt_boxes, gt_labels):\n gt_box_num = gt_box.shape[0]\n score = 0\n box_num = boxes.shape[0]\n correct_label_flag = np.zeros(gt_label.shape)\n for i in range(box_num):\n pred_box = boxes[i]\n max_iou_confi = 0\n for j in range(gt_box_num):\n iou = calculate_iou(pred_box[:4], gt_box[j][:4])\n if labels[i] == gt_label[j] and iou > iou_thres and correct_label_flag[j] == 0:\n max_iou_confi = max(max_iou_confi, pred_box[-1] + iou)\n correct_label_flag[j] = 1\n score += max_iou_confi\n det_scores.append(score)\n correct_labels_num.append(np.sum(correct_label_flag))\n return np.array(det_scores), np.array(correct_labels_num)", "def get_accuracy_score(labels_true: np.ndarray, labels_pred: np.ndarray) -> float:\n check_vector_format(labels_true, labels_pred)\n mask = (labels_true >= 0) & (labels_pred >= 0)\n if np.sum(mask):\n return np.mean(labels_true[mask] == labels_pred[mask])\n else:\n raise ValueError('No sample with both true non-negative label and predicted non-negative label.')", "def evaluate(true_labels, predicted_labels):\n accuracy = np.round(metrics.accuracy_score(true_labels, predicted_labels), \n 2)\n precision = np.round(metrics.precision_score(true_labels, predicted_labels, \n average='weighted'), 2)\n recall = np.round(metrics.recall_score(true_labels, predicted_labels,\n average='weighted'), 2)\n f1 = np.round(metrics.f1_score(true_labels, predicted_labels, \n average='weighted'), 2)\n \n return accuracy, precision, recall, f1", "def eval_performance(weights, test_y, test_x):\n y_predicted = predict_labels(weights, test_x)\n accuracy = len(y_predicted[y_predicted == test_y]) / len(y_predicted)\n return accuracy", "def evaluate(labels, predictions):\n positive_count = 0\n positive = 0\n negative_count = 0\n negative = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n positive_count+=1\n if predictions[i] == 1:\n positive +=1\n else:\n negative_count+=1\n if predictions[i] == 0:\n negative +=1\n\n sensitivity = positive / positive_count\n specificity = negative / negative_count\n\n return (sensitivity, specificity)", "def eval(self, test_docs, test_labels):\n assert len(test_docs)==len(test_labels)\n preds = [] # predicted labels\n for doc,y_gold in zip(test_docs,test_labels):\n y_pred = self.predict(doc)\n preds.append(y_pred)\n ev = Eval(test_labels, preds)\n return ev.accuracy()", "def accuracy_score(Y_true, Y_predict):\n \n true = 0\n\n for i in range(len(Y_predict)):\n if Y_true[i] == Y_predict[i]:\n true+=1\n acc = true/float(len(Y_true))* 100.0\n return acc", "def accuracy(pred, labels):\n pred = torch.sigmoid(pred)\n predicted = (pred > 0.5).int()\n correct = (predicted == labels).sum().item()\n return correct / labels.shape[0]", "def compute_scores(self):\n if self.num_classes == 2:\n score_1 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold=0.5,\n )[1]\n\n score_2 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold_ratio=0.5,\n )[1]\n\n score_3 = self.competition_metric(\n impact_threshold=0.5,\n )[1]\n else:\n score_1 = self.detection_metric(threshold=0.1)\n score_2 = self.detection_metric(threshold=0.25)\n score_3 = self.detection_metric(threshold=0.5)\n\n return score_1, score_2, score_3", "def accuracy_score(self, y_true=None, y_pred=None, labels=None, average=\"macro\", decimal=None, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data(y_true, y_pred, decimal)\n matrix, imap, imap_count = cu.calculate_confusion_matrix(y_true, y_pred, labels, normalize=None)\n metrics = cu.calculate_single_label_metric(matrix, imap, imap_count)\n\n list_accuracy = np.array([item[\"accuracy\"] for item in metrics.values()])\n list_weights = np.array([item[\"n_true\"] for item in metrics.values()])\n list_tp = np.array([item['tp'] for item in metrics.values()])\n\n if average == \"micro\":\n accuracy = np.sum(list_tp) / np.sum(list_weights)\n elif average == \"macro\":\n accuracy = np.mean(list_accuracy)\n elif average == \"weighted\":\n accuracy = np.dot(list_weights, list_accuracy) / np.sum(list_weights)\n else:\n accuracy = dict([(label, np.round(item[\"accuracy\"], decimal)) for label, item in metrics.items()])\n return accuracy if type(accuracy) == dict else np.round(accuracy, decimal)", "def accuracy_compute(predictions, labels):\n with tf.name_scope('test_accuracy'):\n accu = 100 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]\n tf.summary.scalar('test_accuracy', accu)\n return accu", "def evaluate(self, dataset):\n success = 0\n for sample, labelVector, label in dataset.tests:\n if self.guessLabel(sample) == label:\n success += 1\n return success / len(dataset.tests)", "def score_features(self, features, predictor, cv_fold, verbose=0):\n # First we optimise the hyper parameters:\n # data has 4 keys but only 2 (x_train and y_train) will be used for the optimization\n best_params = optimize_hyper_parameters(features, predictor, cv_fold, verbose)\n predictor.set_hyper_parameters(best_params)\n\n # Then we fit the predictor:\n predictor.fit(features)\n\n # Afterwards, we generate the prediction\n y_pred = predictor.predict(features)\n\n # Finally, we compute the metrics:\n metric_res = score_prediction(features['y_test'], y_pred)\n\n self.predictor = predictor\n\n return metric_res, best_params", "def predict(self, test_set, test_labels):\n\n with tf.Session() as self.tf_session:\n self.tf_saver.restore(self.tf_session, self.models_dir + self.model_name)\n return self.accuracy.eval({self.input_data: test_set, self.input_labels: test_labels})", "def accuracy(self, probabilities, labels):\n # TODO: calculate the batch accuracy\n count = 0\n for x in range(len(probabilities)):\n if np.argmax(labels[x]) == np.argmax(probabilities[x]):\n count += 1\n return count/len(probabilities)", "def score(self, phrases):\n pred = self.predict(phrases)\n return accuracy_score(target(phrases), pred)", "def label_accuracies(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (num_correct / preds.size(0)) * 100.0", "def accuracy(probabilities: np.ndarray, labels: np.ndarray) -> float:\n # [batch_size]\n predictions = probabilities.argmax(axis=1)\n # [batch_size]\n labels = labels.argmax(axis=1)\n return (predictions == labels).astype(int).mean()", "def score(self, X_test, y_test):\r\n counter = 0\r\n sr = self.predict(X_test)\r\n for i in range(len(y_test)):\r\n if sr[i] == y_test[i]:\r\n counter += 1\r\n return counter / len(y_test)\r\n pass", "def label_accuracy_score(label_true, label_pred, n_class):\n hist = _fast_hist(label_true.flatten(), label_pred.flatten(), n_class)\n acc = np.diag(hist).sum() / hist.sum().astype(np.float64)\n acc_cls = np.diag(hist) / hist.sum(axis=1).astype(np.float64)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)).astype(np.float64)\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum().astype(np.float64)\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_iu, fwavacc", "def scoring(estimator, features_test, labels_test):\n pred = estimator.predict(features_test)\n p = metrics.precision_score(labels_test, pred, average='micro')\n r = metrics.recall_score(labels_test, pred, average='micro')\n if p > 0.3 and r > 0.3:\n return metrics.f1_score(labels_test, pred, average='macro')\n return 0", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n n_samples = targets.shape[0]\n _, y_pred = predictions.max(dim=1)\n accuracy = (y_pred == targets).sum().item() / n_samples\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def calc_ac_score(labels_true, labels_pred):\n nclass = len(np.unique(labels_true))\n labels_size = len(labels_true)\n mat = labels_size * np.ones((nclass, nclass))\n \n idx = 0\n \n for i in range(labels_size):\n mat[labels_pred[i], labels_true[i]] -= 1.0\n \n munkres = Munkres()\n mapping = munkres.compute(mat)\n \n ac = 0.0\n\n for i in range(labels_size):\n val = mapping[labels_pred[i]][1]\n if val == labels_true[i]:\n ac += 1.0\n\n ac = ac / labels_size \n \n return ac", "def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts", "def score(self, x, y, **kwargs):\n y = np.searchsorted(self.classes_, y)\n kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)\n\n loss_name = self.model.loss\n if hasattr(loss_name, '__name__'):\n loss_name = loss_name.__name__\n if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:\n y = to_categorical(y)\n\n outputs = self.model.evaluate(x, y, **kwargs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n for name, output in zip(self.model.metrics_names, outputs):\n if name == 'acc':\n return output\n raise ValueError('The model is not configured to compute accuracy. '\n 'You should pass `metrics=[\"accuracy\"]` to '\n 'the `model.compile()` method.')", "def score_calc(self, annotations, predictions):\n\n mean_probabilities_of_classes = np.expand_dims(np.mean(predictions, axis=0), axis=0)\n KL_d = predictions * (np.log(predictions + self.eps) - np.log(mean_probabilities_of_classes + self.eps))\n KL_D = KL_d.sum(axis=1)\n\n score = np.exp(np.mean(KL_D))\n return score", "def predict(self, model, arg):\n prediction = model.predict(arg)\n\n return prediction\n\n #def getAccuracyScore(self, n_splits):\n \"\"\"\n Gives an cross-validated accuracy score for the new model.\n\n Inputs:\n n_splits: number of sets to split the data into\n\n Returns:\n score: the accuracy score of the model.\n \"\"\"", "def accuracy(predictions, targets):\n correct_count = 0\n for prediction, target in zip(predictions, targets):\n if prediction == target:\n correct_count += 1\n return correct_count / len(predictions)", "def Evaluate_Prediction(prediction_mask, true_mask, feature_dict, \n test_name = 'Test'):\n \n # true_mask has 3 layers but they are redundant\n true_mask = true_mask[:,:,0]\n \n # Convert from Prob to 0,1,2...\n prediction_mask = prediction_mask.argmax(axis = 2) + 1 \n\n # Compute confusion matrix -- subtract 1 so that first label is \"0\" \n conf = custom_confusion_matrix(prediction_mask.flatten(), true_mask.flatten(), feature_dict)\n \n # Convert mask to proper shape for loss function - shape should have 4 dimensions with one-hot encoding\n true_mask = Expand_Mask(mask = true_mask, num_class = len(feature_dict)) ## to 0,1\n true_mask = np.expand_dims(true_mask, axis=0)\n true_mask = true_mask.astype(np.float)\n\n # Convert prediction into proper shape for loss function\n prediction_mask = Expand_Mask(mask = prediction_mask, num_class = len(feature_dict)) #to 0,1\n prediction_mask = np.expand_dims(prediction_mask, axis=0) \n prediction_mask = prediction_mask.astype(np.float)\n \n score = {'Test':test_name, \n 'Dice':Dice_Coef_Multilabel(true_mask, prediction_mask).numpy(), \n 'Accuracy':np.mean(tf.metrics.categorical_accuracy(true_mask, prediction_mask)), \n 'CE':np.mean(tf.metrics.categorical_crossentropy(true_mask, prediction_mask))}\n \n return [score, conf]", "def fit_predict_score(self, train_reviews: List[ParsedText],\n test_reviews: List[ParsedText], test_reviews_pred: List[ParsedText],\n **kwargs) -> List[ParsedText]:\n\n self.fit(train_texts=train_reviews, val_texts=test_reviews, **kwargs)\n test_reviews_pred = self.predict(test_reviews_pred)\n logging.info(f'Score: {self.score(texts=test_reviews, texts_pred=test_reviews_pred)}')\n return test_reviews_pred", "def score_my_predictions(self):\n resolution = self.resolution\n if resolution is None:\n last_community_prediction = self.prediction_timeseries[-1]\n resolution = last_community_prediction[\"distribution\"][\"avg\"]\n predictions = self.my_predictions[\"predictions\"]\n return [\n self.score_prediction(prediction, resolution) for prediction in predictions\n ]", "def labels_to_scores(labels):\n device = sp.get_device(labels)\n xp = device.xp\n with device:\n num_classes = labels.max() + 1\n scores = xp.zeros([len(labels), num_classes], dtype=np.float32)\n scores[xp.arange(len(labels)), labels] = 1\n\n return scores", "def get_accuracy(self, gold, predicted):\n # Exercise 3: calculate accuracy\n i = 0\n j = 0\n for labels in gold:\n if labels == predicted[i]:\n j +=1\n i +=1\n return j / i * 100", "def sentence_accuracy(references, predictions):\n count = 0.0\n match = 0.0\n for label, pred in zip(references, predictions):\n if label == pred:\n match += 1\n count += 1\n return 100 * match / count", "def score_model(self, model, test_training, test_target):\n\n target_prediction = model.predict(test_training)\n from sklearn.metrics import classification_report\n if(self.VERBOSE):\n print(classification_report(test_target, target_prediction))\n\n return [\n f1_score(test_target, target_prediction, average='weighted'),\n precision_score(test_target, target_prediction, average='weighted'),\n recall_score(test_target, target_prediction, average='weighted')\n ]", "def evaluate(self, predicted_df):\n logging.info(\"Evaluating model: {}\".format(self.model_type))\n y_true = predicted_df[\"user_label\"].as_matrix()\n y_pred = predicted_df[\"label\"].as_matrix()\n\n scores_cols = [col for col in predicted_df.columns if col.startswith(\"scores_\")]\n print(\"scores_cols: {}\".format(scores_cols))\n\n y_pred_scores = predicted_df[scores_cols].copy().fillna(value=0).as_matrix()\n print(\"predicted scores: {}\".format(y_pred_scores))\n y_true_scores = []\n for lab in predicted_df[\"user_label\"]:\n trues = [0 for _ in range(len(scores_cols))]\n if \"scores_\"+lab in scores_cols:\n trues[scores_cols.index(\"scores_\"+lab)] = 1\n y_true_scores.append(trues)\n print(\"true scores: {}\".format(y_true_scores))\n y_true_scores = np.array(y_true_scores)\n\n performance = {\"model\": self.model_type, \"description\": self.description}\n if 'categorical_accuracy' in self.metrics:\n logging.info(\"Calculating categorical accuracy for {}\".format(self))\n performance['categorical_accuracy'] = sklearn.metrics.accuracy_score(y_true,\n y_pred) # np.mean(y_pred == y_true)\n if 'fmeasure' in self.metrics:\n logging.info(\"Calculating fmeasure for {}\".format(self))\n performance['fmeasure'] = sklearn.metrics.f1_score(y_true, y_pred, average=self.metrics_average)\n if 'MRR' in self.metrics:\n logging.info(\"Calculating MRR for {}\".format(self))\n performance['MRR'] = sklearn.metrics.label_ranking_average_precision_score(y_true_scores, y_pred_scores)\n logging.info(\"Calculated performance: {}\".format(performance))\n print(performance)\n return pd.DataFrame(performance, index=[0])", "def score(self, X, y, **kwargs):\n from cuml.metrics.accuracy import accuracy_score\n\n if hasattr(self, \"handle\"):\n handle = self.handle\n else:\n handle = None\n\n preds = self.predict(X, **kwargs)\n return accuracy_score(y, preds, handle=handle)", "def update(self, labels, preds):\n #labels, preds = check_label_shapes(labels, preds, True)\n\n for label, pred_label in zip(labels, preds):\n if len(pred_label.shape) > 2:\n pred_label = mx.nd.reshape(pred_label, shape=[-1, pred_label.shape[-1]])\n label = mx.nd.reshape(pred_label, shape=[-1])\n\n # Using argpartition here instead of argsort is safe because\n # we do not care about the order of top k elements. It is\n # much faster, which is important since that computation is\n # single-threaded due to Python GIL.\n pred_label = np.argpartition(pred_label.asnumpy().astype('float32'), -self.top_k)\n label = label.asnumpy().astype('int32')\n check_label_shapes(label, pred_label)\n num_dims = len(pred_label.shape)\n mask = (label != self.ignore_label).astype(np.int32)\n num_samples = mask.sum()\n\n num_classes = pred_label.shape[1]\n top_k = min(num_classes, self.top_k)\n for j in range(top_k):\n num_correct = ((pred_label[:, num_classes - 1 - j].flat == label.flat) * mask).sum()\n self.sum_metric += num_correct\n self.global_sum_metric += num_correct\n\n self.num_inst += num_samples\n self.global_num_inst += num_samples", "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def accuracy(targets: List[int],\n preds: Union[List[float], List[List[float]]],\n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)", "def evaluate_classifier(self, clf):\n\n clf = clf.fit(self.training_data_train_x, self.training_data_train_y)\n predicted = clf.predict(self.training_data_opt_x)\n\n correct = 0\n for i in range(len(self.training_data_opt_y)):\n if predicted[i] == self.training_data_opt_y[i]:\n correct += 1\n\n accuracy = correct / len(self.training_data_opt_y)\n\n return clf, accuracy", "def multiclass_accuracy(prediction, ground_truth):\n correct = sum(a == b for a, b in zip(prediction, ground_truth))\n\n accuracy = correct / len(ground_truth)\n\n return accuracy", "def classifiction_metric(preds, labels, label_list):\n\n acc = metrics.accuracy_score(labels, preds)\n\n labels_list = [i for i in range(len(label_list))]\n\n report = metrics.classification_report(\n labels, preds, labels=labels_list, target_names=label_list, digits=5, output_dict=True)\n\n return acc, report", "def score(y_values):\n y_act = y_values[:,0]\n y_pred = y_values[:,1]\n return (y_act==y_pred).mean()*100", "def ranked_accuracy(predictions, labels):\n rank1 = 0\n rank5 = 0\n\n for (pred, lbl) in zip(predictions, labels):\n pred = np.argsort(pred)[::-1]\n\n if lbl in pred[:5]:\n rank5+=1\n\n if lbl == pred[0]:\n rank1+=1\n\n rank1 /= float(len(labels))\n rank5 /= float(len(labels))\n\n return (rank1, rank5)", "def accuracy(targets: List[int], preds: Union[List[float], List[List[float]]], \n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)", "def accuracy(outputs, labels):\r\n outputs = np.argmax(outputs, axis=1)\r\n return np.sum(outputs == labels) / float(labels.size)", "def _compute_scores(self, triples):\n # compute scores as sum(s * p * o)\n scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1)\n return scores", "def evaluate(inputs, labels):\n # Your code here.\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=1)\n return np.mean(preds == trues)" ]
[ "0.71214116", "0.7117111", "0.71014607", "0.70404774", "0.70164144", "0.6998174", "0.6991339", "0.6985778", "0.69506323", "0.6912673", "0.69000816", "0.6899743", "0.6899066", "0.68485504", "0.6846754", "0.6846514", "0.684031", "0.6839982", "0.6834518", "0.6833685", "0.68276966", "0.6824302", "0.6810127", "0.6799235", "0.679791", "0.679791", "0.6794019", "0.67782927", "0.67747587", "0.6759102", "0.6743652", "0.6737129", "0.67353684", "0.67313176", "0.672891", "0.6726307", "0.67096114", "0.6704101", "0.6704101", "0.6703847", "0.6679464", "0.66658324", "0.66633207", "0.6661854", "0.6652531", "0.66431814", "0.66411036", "0.6638404", "0.66259986", "0.66190815", "0.6615855", "0.65951055", "0.6580363", "0.6573055", "0.6572204", "0.65696865", "0.6569428", "0.6560927", "0.6560281", "0.655167", "0.6544006", "0.653879", "0.6534058", "0.6533804", "0.6530238", "0.6522339", "0.65211403", "0.6519578", "0.6516537", "0.65084743", "0.6508212", "0.6503508", "0.6486394", "0.64733297", "0.64605314", "0.6447797", "0.6442129", "0.64330655", "0.6429217", "0.6426554", "0.64207906", "0.6417817", "0.64176923", "0.64168096", "0.64093727", "0.63968545", "0.63859063", "0.638531", "0.6369794", "0.63585395", "0.63502693", "0.6345446", "0.634452", "0.63418955", "0.63390946", "0.633783", "0.6334331", "0.6331731", "0.63285416", "0.6326367", "0.6319256" ]
0.0
-1
Return a head compatible with this task
def create_compatible_head( self, n_features: int, device: Optional[str] = None, ): head = nn.Linear(n_features, self.n_classes) xavier_initialize(head) if device is not None: head = head.to(device) return head
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def head(self) -> TaskHead:\n return self._model.head", "def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))", "def getHostHead(self):\n return self.host_head", "def head(self):\n return self._head", "def head(self):\n return self._head", "def first(self) -> Task:\n return self._tasks[0]", "def get_head_node_id() -> str:\n head_node_id = None\n for node in ray.nodes():\n if HEAD_NODE_RESOURCE_NAME in node[\"Resources\"] and node[\"Alive\"]:\n head_node_id = node[\"NodeID\"]\n break\n assert head_node_id is not None, \"Cannot find alive head node.\"\n\n return head_node_id", "def task_headrooms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanLaunchSpecSchedulingTaskTaskHeadroomArgs']]]]:\n return pulumi.get(self, \"task_headrooms\")", "def get(self) -> Task: # pragma: no cover\n raise NotImplementedError", "def build_rpn_head(cfg):\n name = cfg.MODEL.RPN.HEAD_NAME\n return RPN_HEAD_REGISTRY.get(name)(cfg)", "def task(self) -> base_model.BaseTask:\n return self._task", "def currenthead(self):\n return self.repo.head.object", "def get_head_vertex(self):\n return self.graph.vertices[self.head_vertex.vertex_number]", "def get_task_module(self, \n task: ExtendedTaskHandle\n ) -> Tuple[Optional[WorkflowHandle], int]:\n # Get the handle for the head workflow of the specified branch\n branch = self.projects.get_branch(\n project_id=task.project_id,\n branch_id=task.branch_id\n )\n if branch is None:\n return None, -1\n head = branch.get_head()\n if head is None or len(head.modules) == 0:\n return None, -1\n # Find module (searching from end of list)\n i = 0\n for m in reversed(head.modules):\n i += 1\n if m.identifier == task.module_id:\n return head, len(head.modules) - i\n return None, -1", "def head_host(self) -> str:\n return self.head_args.host if self.head_args else None", "def head_obj(cls, client, spec):\n return cls.set_header(client.get(cls.HEAD.format(**spec))[0])", "def head(self, *args):\n pass", "def __init__(self, head=None):\r\n self.head = head", "def head(self) -> ComponentTableHead:\n return ComponentTableHead(\n self.wait_for_elements_by_tag_name('tr')[0])", "def first(self):\r\n return self.__head", "def head(self):\n if self.isquiet():\n raise QueueEmpty()\n\n qcurr = self.base + \".\" + str(self.curr)\n assert os.path.exists(qcurr)\n qt = open(qcurr, \"r\")\n data = qt.read()\n qt.close()\n return data", "def get_head(self, key):\n path = os.path.join(self.directory, self.subdirectory, key)\n stat = os.stat(path)\n return Head(path=path, st_size=stat.st_size, st_mtime=stat.st_mtime)", "def first(self):\n return self.__head", "def head(self) -> object:\n if not self._head:\n raise EmptyListException(\"The list is empty.\")\n return self._head", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def get_task(self, key: str) -> Task:\n raise NotImplementedError", "def dummy_head_node(mocker):\n mocker.patch(\n \"pcluster.config.cluster_config.HeadNodeNetworking.availability_zone\",\n new_callable=PropertyMock(return_value=\"us-east-1a\"),\n )\n head_node_networking = HeadNodeNetworking(\n subnet_id=\"dummy-subnet-1\", proxy=Proxy(http_proxy_address=\"http://10.0.0.164:3129\")\n )\n head_node_networking.additional_security_groups = [\"additional-dummy-sg-1\"]\n head_node_dcv = Dcv(enabled=True, port=1024)\n head_node_imds = Imds(secured=True)\n ssh = HeadNodeSsh(key_name=\"test\")\n\n custom_actions = CustomActions(\n on_node_start=[\n CustomAction(script=\"https://tests1\", args=[\"arg1\", \"arg2\"]),\n CustomAction(script=\"https://tests2\", args=[\"arg1\", \"arg2\"]),\n ],\n on_node_updated=CustomAction(script=\"https://testus\", args=[\"arg1\", \"arg2\"]),\n on_node_configured=None,\n )\n\n head_node = HeadNode(\n instance_type=\"fake\",\n networking=head_node_networking,\n ssh=ssh,\n dcv=head_node_dcv,\n imds=head_node_imds,\n custom_actions=custom_actions,\n )\n\n return head_node", "def getHeadway(self):\n return self.headway", "def build_head(self, n_features, device=None):\n # By default this is a linear layer\n self.head = self.create_compatible_head(n_features, device)", "def do_HEAD(self):\n self.log.debug('do_HEAD called')\n self.HeadGet('HEAD')", "def HEAD(self, req):\n return self.GETorHEAD(req)", "def _get_head(wit_path):\n\n head = _get_references_data(wit_path)['HEAD']\n if len(head) == 40:\n return head\n return _get_references_data(wit_path)[head]", "def __init__(self, head=None):\n\n self.head = head", "def make_task(self):\n return Task()", "def head(array) -> T:\n return array[0]", "def chose_head(g,h,d): # g: graph; h: head; d: dependent\n if is_nonprojective_edge(g,h,d):\n return chose_head(g,g.nodes[h]['head'],d)\n return h", "def __init__(self):\n\n self._mh = MasterHead.get_head()", "def GetListHead(self, *args, **kwargs):\n pass", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def get_creator_from_head_node_tag(pd_series):\n\n # Check head node exists Head node\n if not pd.isna(pd_series[\"Head Node\"]):\n creator = get_creator_tag(pd_series[\"Head Node\"])\n else:\n # Return NA if no head node\n return np.nan\n\n # Check creator also isn't none\n if creator is not None:\n return creator\n\n return np.nan", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def set_head(self, type: Type[TaskHead], **kwargs):\n\n self._config.head = TaskHeadConfiguration(type=type, **kwargs)\n self._model.set_head(self._config.head.compile(backbone=self.backbone))", "def head_insert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"head_insert\")", "def head_insert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"head_insert\")", "def loadHeadId(self):\n rec = self.db.selectById(self.tableName, 1)\n if rec:\n self.headId = rec['childId']\n return self.headId\n print('Error! No first sublist record found.')\n return None", "def __init__(self):\r\n self.head = None", "def head_port(self):\n return self.head_args.port[0] if self.head_args else None", "def get_node_from_hint(self, hint='blocked'):\n self.cond.acquire()\n node = None\n try:\n if hint == 'blocked':\n node = self.nodes[self.next_index]\n self.next_index = (self.next_index + 1) % len(self.nodes)\n\n elif hint == 'strided':\n previous_node = self.nodes[self.next_index - 1]\n start_index = self.next_index\n # Search for next node\n while (self.nodes[self.next_index] == previous_node):\n # Skip next\n self.next_index = (self.next_index + 1) % len(self.nodes)\n \n if self.next_index == start_index:\n # All entries in nodefile is identical\n break\n\n # Select node\n node = self.nodes[self.next_index]\n self.next_index = (self.next_index + 1) % len(self.nodes)\n \n #elif hint == 'auto':\n # raise InfoException(\"The auto hint is not implemented yet. Use 'blocked', 'strided' or 'local'.\")\n\n elif hint == 'local':\n node = 'localhost'\n\n else:\n raise InfoException(\"ClusterProcess does not support hint='\"+hint+\"'. Please replace with 'strided', 'blocked' or 'local'.\")\n finally:\n self.cond.release()\n\n return node", "def active_branch(self) -> Head:\n # reveal_type(self.head.reference) # => Reference\n return self.head.reference", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def get_previous_task():\n return _tasks[0] if len(_tasks) != 0 else None", "def first(self):\r\n if self.head == None: #check if first(head) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.head.data #return the data of head node\r", "def head_insert(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"head_insert\")", "def check_task(self): \n return self.buffer[0]", "def connect(self):\n # sends User-Agent and Refferer (main page on the server) in the header, \n # it's necessary when the server blocks downloading via links from other resources\n headers = {'User-Agent': self.user_agent, 'Refferer': '{}://{}/'.format(self.url.protocol, self.url.host)}\n self.conn = self.protocol(self.url.host, timeout=self.timeout)\n self.conn.request('HEAD', self.url.request, headers=headers)\n response = self.conn.getresponse()\n\n # status 3xx\n if response.status // 100 == 3:\n location = response.getheader('Location')\n return self.redirect(location, response.status)\n\n if response.status != 200: # HTTP(S) error\n return TaskHeadError(self.url.host, response.status)\n\n file_size = int(response.getheader('Content-Length'))\n info = TaskHeadData(self.url.host, response.status, file_size)\n response.close()\n return info", "def Task(self):\n return self.create_task_cls()", "def getHeadId(self):\n if self.cursor:\n return self.cursor.childId\n return None", "def task_definition(self):\n return self._task_definition", "def requireTask(self, name):\n t = self.getTask(name)\n if t is None:\n raise Exception(\"Task %s not found in service\" % name)\n return t", "def get_head_node_ip(self):\n dashboard_root = self.get_by_id(DashboardDataRoot, self.ROOT_KEYNAME)\n if dashboard_root and dashboard_root.head_node_ip is not None:\n return dashboard_root.head_node_ip\n else:\n return self.update_head_node_ip()", "def getDomainHead(self):\n return self.domain_head", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def get_task(self, key: str) -> Task:\n if \"task\" not in self.task_graph.nodes[key]:\n coro_factory = self.task_graph.nodes[key][\"coroutine_factory\"]\n self.task_graph.nodes[key][\"task\"] = CoroutineTask(coro_factory)\n return self.task_graph.nodes[key][\"task\"]", "def headword(self) -> str:\n return self._headword", "def __init__(self, head):\n self.head = head", "def fork_task(self) -> \"Task\":\n new_task = Task(\n headers=self.headers,\n payload=self.payload,\n payload_persistent=self.payload_persistent,\n priority=self.priority,\n parent_uid=self.parent_uid,\n root_uid=self.root_uid,\n orig_uid=self.uid,\n )\n return new_task", "def retinanet_head_generator(params):\n head_params = params.model_params.architecture.head_params\n anchors_per_location = params.model_params.anchor.num_scales * len(params.model_params.anchor.aspect_ratios)\n return heads.RetinanetHead(\n params.model_params.architecture.min_level,\n params.model_params.architecture.max_level,\n params.model_params.architecture.num_classes,\n anchors_per_location,\n head_params.num_convs,\n head_params.num_filters,\n head_params.use_separable_conv,\n norm_activation=norm_activation_generator(params.model_params.norm_activation),\n )", "def HEAD(self, req):\r\n resp = req.get_response(self.app)\r\n\r\n return HTTPOk(headers=resp.headers)", "def HEAD(self, req):\n resp = req.get_response(self.app)\n\n return HTTPOk(headers=resp.headers)", "def _get_chunk_head(self, prepend_byte: bytes) -> models.ChunkHeadToken:\n if self._chunk_state is not None:\n raise exceptions.StreamStateError(\n \"Must finish last chunk before starting another\"\n )\n # One byte has already been read by this chunk, so don't add\n # another to the count.\n start_position = self.total_bytes_read\n [length] = struct.unpack('>I', prepend_byte + self._read(3))\n if length > PNG_MAX_CHUNK_LENGTH:\n fmt = (\n \"Chunk claims to be {actual} bytes long, must be \"\n \"no longer than {max}.\"\n )\n raise exceptions.PNGSyntaxError(fmt.format(\n actual=length,\n max=PNG_MAX_CHUNK_LENGTH\n ))\n type_code = self._read(4)\n if not models.PNG_CHUNK_TYPE_CODE_ALLOWED_BYTES.issuperset(type_code):\n raise exceptions.PNGSyntaxError(\n \"Invalid type code for chunk at byte {position}\".format(\n position=start_position,\n )\n )\n head = models.ChunkHeadToken(length, type_code, start_position)\n self._chunk_state = _SingleChunkState(head)\n return head", "def __init__(self):\n self.head = None", "def head(self,\n nodeid: str,\n attrs: List[str] = ['form']) -> Tuple[int, List[Any]]:\n\n if self.graph.nodes[nodeid]['domain'] != 'semantics':\n errmsg = 'Only semantics nodes have heads'\n raise ValueError(errmsg)\n\n is_performative = 'pred-root' in nodeid or\\\n 'arg-author' in nodeid or\\\n 'arg-addressee' in nodeid or\\\n 'arg-0' in nodeid\n \n if is_performative:\n errmsg = 'Performative nodes do not have heads'\n raise ValueError(errmsg)\n \n return [(self.graph.nodes[e[1]]['position'],\n [self.graph.nodes[e[1]][a] for a in attrs])\n for e, attr in self.instance_edges(nodeid).items()\n if attr['type'] == 'head'][0]", "def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()", "def rpn_head_generator(params):\n head_params = params.rpn_head\n if head_params.anchors_per_location:\n logging.info('[Deprecation]: `rpn_head.anchors_per_location` '\n 'is no longer used.')\n anchor_aspect_ratios = len(params.anchor.aspect_ratios)\n anchor_num_scales = params.anchor.num_scales\n anchors_per_location = anchor_aspect_ratios * anchor_num_scales\n return heads.RpnHead(\n params.architecture.min_level,\n params.architecture.max_level,\n anchors_per_location,\n head_params.num_convs,\n head_params.num_filters,\n head_params.use_separable_conv,\n params.batch_norm_activation.activation,\n head_params.use_batch_norm,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation))", "def head(cls, client, spec, headers=None):\n return cls(client, spec, headers)", "def __runHeadNode(self, nProcs, port=None):\n address = None\n # get local enviroment\n localEnv = os.environ.copy()\n localEnv[\"PYTHONPATH\"] = os.pathsep.join(sys.path)\n if self._parallelLib == ParallelLibEnum.ray:\n command = [\"ray\", \"start\", \"--head\"]\n if nProcs is not None:\n command.append(\"--num-cpus=\"+str(nProcs))\n if port is not None:\n command.append(\"--port=\"+str(port))\n outFile = open(\"ray_head.ip\", 'w')\n rayStart = utils.pickleSafeSubprocessPopen(command,shell=False,stdout=outFile, stderr=outFile, env=localEnv)\n rayStart.wait()\n outFile.close()\n if rayStart.returncode != 0:\n self.raiseAnError(RuntimeError, f\"RAY failed to start on the --head node! Return code is {rayStart.returncode}\")\n else:\n address = self.__getRayInfoFromStart(\"ray_head.ip\")\n elif self._parallelLib == ParallelLibEnum.dask:\n self.daskSchedulerFile = os.path.join(self.runInfoDict['WorkingDir'],\"scheduler.json\")\n if os.path.exists(self.daskSchedulerFile):\n self.raiseADebug(\"Removing \"+str(self.daskSchedulerFile))\n os.remove(self.daskSchedulerFile)\n\n tries = 0\n succeeded = False\n while not succeeded:\n #If there is a way to tell dask scheduler to automatically choose a\n # port, please change this to that.\n scheduler = utils.pickleSafeSubprocessPopen([\"dask\",\"scheduler\",\n \"--scheduler-file\",\n self.daskSchedulerFile,\n \"--port\",str(8786+tries)])\n\n waitCount = 0.0\n while not (os.path.exists(self.daskSchedulerFile) or scheduler.poll() is not None or waitCount > 20.0):\n time.sleep(0.1)\n waitCount += 0.1\n if os.path.exists(self.daskSchedulerFile) and scheduler.poll() is None:\n succeeded = True\n self._daskScheduler = scheduler\n self.raiseADebug(\"dask scheduler started with \"+str(self.daskSchedulerFile))\n break\n if scheduler.poll() is None:\n self.raiseAWarning(\"killing dask scheduler\")\n scheduler.terminate()\n tries += 1\n if tries > 20:\n succeeded = False\n self.raiseAWarning(\"failed to start dask scheduler\")\n self.daskSchedulerFile = None\n break\n if succeeded:\n #do equivelent of dask worker start in start_dask.sh:\n # dask worker --nworkers $NUM_CPUS --scheduler-file $SCHEDULER_FILE >> $OUTFILE\n outFile = open(os.path.join(self.runInfoDict['WorkingDir'],\n \"server_debug_\"+self.__getLocalHost()),'w')\n command = [\"dask\",\"worker\",\"--scheduler-file\",self.daskSchedulerFile]\n if nProcs is not None:\n command.extend((\"--nworkers\",str(nProcs)))\n headDaskWorker = utils.pickleSafeSubprocessPopen(command,shell=False,\n stdout=outFile, stderr=outFile, env=localEnv)\n return address", "def head(self, params=None):\n params = self.parameters(additional_parameters=params)\n res = head(self.endpoint_url, params=params)\n return Response(res)", "def change_model_head(model, custom_head, **kwargs):\n model.head = custom_head(model.head_nf, model.c_out, model.seq_len, **kwargs)\n return model" ]
[ "0.78820324", "0.7001775", "0.6367529", "0.6272498", "0.6272498", "0.6085029", "0.60775405", "0.60211575", "0.59077823", "0.58798844", "0.5837654", "0.578382", "0.5746318", "0.570547", "0.5696375", "0.56910676", "0.56888205", "0.56619275", "0.56599844", "0.5638932", "0.5638657", "0.56186014", "0.56113774", "0.56055164", "0.5604125", "0.56003344", "0.5575256", "0.556135", "0.5520985", "0.55000377", "0.54974407", "0.54933923", "0.5487523", "0.5483564", "0.5466212", "0.545646", "0.54327244", "0.5428336", "0.5421104", "0.541784", "0.54123956", "0.54123956", "0.54123956", "0.54123956", "0.5398276", "0.5398276", "0.5398276", "0.5398276", "0.5398276", "0.538625", "0.5344021", "0.5344021", "0.5343124", "0.5342407", "0.53186476", "0.5318448", "0.5312339", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.5312168", "0.53102094", "0.5304989", "0.53012687", "0.5288396", "0.5282993", "0.5281304", "0.52644736", "0.5253963", "0.5253961", "0.523832", "0.52278244", "0.52251256", "0.52251256", "0.52251256", "0.52185", "0.52029216", "0.51950884", "0.518441", "0.51823604", "0.51700383", "0.5163788", "0.5153707", "0.5144955", "0.51406586", "0.51384085", "0.51379186", "0.5137444", "0.51255023", "0.5120249", "0.51161957" ]
0.5274176
76
Build this task's classification head.
def build_head(self, n_features, device=None): # By default this is a linear layer self.head = self.create_compatible_head(n_features, device)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))", "def head(self) -> TaskHead:\n return self._model.head", "def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n 'and inner_dim {} (prev: {})'.format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = HuggingFaceBertClassificationHead(\n self.args.embed_dim, # self.args.encoder_embed_dim,\n inner_dim or self.args.embed_dim,\n num_classes,\n self.args.pooler_activation_fn,\n self.args.pooler_dropout,\n self.args.quant_noise_pq,\n self.args.quant_noise_pq_block_size,\n )", "def build_head(self):\n stages = [f'stage{i}' for i in range(1, 7)]\n for stage in stages:\n block = getattr(self.arch, stage)\n PAF, CFM = block.keys()\n PAF = build_blocks(block[PAF], 'head')\n CFM = build_blocks(block[CFM], 'head')\n setattr(self, f\"{stage}_PAF\", PAF)\n setattr(self, f\"{stage}_CFM\", CFM)", "def set_head(self, type: Type[TaskHead], **kwargs):\n\n self._config.head = TaskHeadConfiguration(type=type, **kwargs)\n self._model.set_head(self._config.head.compile(backbone=self.backbone))", "def __init__(self, top_n: int = 5, *args, **kwargs):\n super().__init__('Real time classification visualizer',\n *args, **kwargs)\n class_input_spec = self.inputs_specs['classification_data']\n self.class_names = class_input_spec['class_names']\n self.top_n = top_n", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def make_task(layout_parameters=None):\n if not layout_parameters:\n layout_parameters = {\n 'image_url': 'http://herp.com/derp'\n }\n\n return CategorizationTaskFixture(**layout_parameters)", "def build(self, input_image, num_class):\n x = build_resnet(101)\n # add classifier\n x = Conv2D(num_class, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)", "def __init__(self):\n self.label = \"Combine NNOutput Files \"\n self.description = \"Combines PNN, FUZ, and RBN files generated from partitions of the class.dta file.\"\n self.canRunInBackground = False\n self.category = \"Neural network\"", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, channels, num_classes):\n super(AuxiliaryHead, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n # image size = 2 x 2\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False),\n nn.Conv2d(channels, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True),\n )\n self.classifier = nn.Linear(768, num_classes)", "def __subtask_classification__(self,task_id,classification_tasks,marking_tasks,raw_classifications,aggregations):\n\n\n # go through the tools which actually have the followup questions\n for tool in classification_tasks[task_id]:\n\n # now go through the individual followup questions\n # range(len()) - since individual values will be either \"single\" or \"multiple\"\n\n for followup_question_index in range(len(classification_tasks[task_id][tool])):\n global_index = str(task_id)+\"_\" +str(tool)+\"_\"+str(followup_question_index)\n\n\n followup_classification = {}\n # this is used for inserting the results back into our running aggregation - which are based\n # on shapes, not tools\n shapes_per_cluster = {}\n\n # go through each cluster and find the corresponding raw classifications\n for subject_id in aggregations:\n if subject_id == \"param\":\n continue\n\n # has anyone done this task for this subject?\n if task_id in aggregations[subject_id]:\n # find the clusters which we have determined to be of the correct type\n # only consider those users who made the correct type marking\n # what shape did this particular tool make?\n shape = marking_tasks[task_id][tool]\n for cluster_index,cluster in aggregations[subject_id][task_id][shape + \" clusters\"].items():\n if cluster_index in [\"param\",\"all_users\"]:\n continue\n\n # what is the most likely tool for this cluster?\n most_likely_tool,_ = max(cluster[\"tool_classification\"][0].items(),key = lambda x:x[1])\n if int(most_likely_tool) != int(tool):\n continue\n\n\n # polygons and rectangles will pass cluster membership back as indices\n # ints => we can't case tuples\n if isinstance(cluster[\"cluster members\"][0],int):\n user_identifiers = zip(cluster[\"cluster members\"],cluster[\"users\"])\n else:\n user_identifiers = zip([tuple(x) for x in cluster[\"cluster members\"]],cluster[\"users\"])\n ballots = []\n\n for user_identifiers,tool_used in zip(user_identifiers,cluster[\"tools\"]):\n # did the user use the relevant tool - doesn't matter if most people\n # used another tool\n if tool_used == tool:\n\n followup_answer = raw_classifications[global_index][subject_id][user_identifiers]\n u = user_identifiers[1]\n ballots.append((u,followup_answer))\n\n followup_classification[(subject_id,cluster_index)] = deepcopy(ballots)\n shapes_per_cluster[(subject_id,cluster_index)] = shape\n\n\n followup_results = self.__task_aggregation__(followup_classification,global_index,{})\n assert isinstance(followup_results,dict)\n\n for subject_id,cluster_index in followup_results:\n shape = shapes_per_cluster[(subject_id,cluster_index)]\n # keyword_list = [subject_id,task_id,shape+ \" clusters\",cluster_index,\"followup_questions\"]\n new_results = followup_results[(subject_id,cluster_index)]\n # if this is the first question - insert\n # otherwise append\n\n if followup_question_index == 0:\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"] = {}\n\n\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"][followup_question_index] = new_results.values()[0]\n\n return aggregations", "def __init__(self, layer_list_info):\n super(DynaNet, self).__init__()\n self.layer_list_info = layer_list_info\n self.task_modules = nn.ModuleDict()\n self.classification_layers = nn.ModuleDict()\n self.module_generator = ModuleFactory(layer_list_info)\n self.task_module_name_path = {}\n self.nr_levels = len(layer_list_info)\n self.task_idx = None", "def _multi_class_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if (n_classes is None) or (n_classes < 2):\n raise ValueError(\"n_classes must be > 1 for classification: %s.\" %\n n_classes)\n\n if n_classes == 2:\n if metric_class_ids:\n raise ValueError(\"metric_class_ids invalid for n_classes==2.\")\n return _BinaryLogisticHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)\n\n return _MultiClassHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)", "def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def build_task_a(self, x, y, is_training, ext_wts=None):\n config = self.config\n global_step = self.global_step\n if config.backbone_class == 'resnet_backbone':\n bb_config = config.resnet_config\n else:\n assert False, 'Not supported'\n proto_config = config.protonet_config\n opt_config = config.optimizer_config\n num_classes_a = self._num_classes_a\n\n # Classification branch for task A.\n h_a = self._run_backbone(x, is_training=is_training, ext_wts=ext_wts)\n self._h_a = h_a\n h_shape = h_a.get_shape()\n h_size = 1\n for ss in h_shape[1:]:\n h_size *= int(ss)\n self._h_size = h_size\n\n if ext_wts is not None:\n w_class_a = weight_variable(\n [h_size, num_classes_a],\n init_method='numpy',\n dtype=self.dtype,\n init_param={'val': np.transpose(ext_wts['w_class_a'])},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a = weight_variable([],\n init_method='numpy',\n dtype=self.dtype,\n init_param={'val': ext_wts['b_class_a']},\n wd=0e0,\n name='b_class_a')\n else:\n w_class_a = weight_variable([h_size, num_classes_a],\n init_method='truncated_normal',\n dtype=self.dtype,\n init_param={'stddev': 0.01},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a = weight_variable([num_classes_a],\n dtype=self.dtype,\n init_method='constant',\n init_param={'val': 0.0},\n name='b_class_a')\n self._w_class_a = w_class_a\n self._b_class_a = b_class_a\n num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64)\n num_classes_a_dyn32 = tf.shape(b_class_a)[0]\n\n if proto_config.cosine_a:\n if proto_config.cosine_tau:\n if ext_wts is None:\n tau_init_val = 10.0\n else:\n tau_init_val = ext_wts['tau'][0]\n tau = weight_variable([],\n dtype=self.dtype,\n init_method='constant',\n init_param={'val': tau_init_val},\n name='tau')\n else:\n tau = tf.constant(1.0)\n\n w_class_a_norm = self._normalize(w_class_a, axis=0)\n h_a_norm = self._normalize(h_a, axis=1)\n dot = tf.matmul(h_a_norm, w_class_a_norm)\n if ext_wts is not None:\n dot += b_class_a\n logits_a = tau * dot\n else:\n logits_a = tf.matmul(h_a, w_class_a) + b_class_a\n\n self._prediction_a = logits_a\n self._prediction_a_all = self._prediction_a\n y_dense = tf.one_hot(y, num_classes_a)\n xent_a = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits_a, labels=y_dense)\n xent_a = tf.reduce_mean(xent_a, name='xent')\n cost_a = xent_a\n self._cost_a = cost_a\n cost_a += self._decay()\n self._prediction_a = logits_a\n return logits_a", "def _construct_prediction_heads(self, num_classes, num_feature_outputs,\n class_prediction_bias_init,\n unit_height_conv=False):\n prediction_heads = {}\n prediction_heads[OBJECT_CENTER] = self._make_prediction_net_list(\n num_feature_outputs,\n num_classes,\n kernel_sizes=self._center_params.center_head_kernel_sizes,\n num_filters=self._center_params.center_head_num_filters,\n bias_fill=class_prediction_bias_init,\n name='center',\n unit_height_conv=unit_height_conv)\n\n if self._od_params is not None:\n prediction_heads[BOX_SCALE] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_SIZE_CHANNELS,\n kernel_sizes=self._od_params.scale_head_kernel_sizes,\n num_filters=self._od_params.scale_head_num_filters,\n name='box_scale',\n unit_height_conv=unit_height_conv)\n prediction_heads[BOX_OFFSET] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS,\n kernel_sizes=self._od_params.offset_head_kernel_sizes,\n num_filters=self._od_params.offset_head_num_filters,\n name='box_offset',\n unit_height_conv=unit_height_conv)\n\n if self._kp_params_dict is not None:\n for task_name, kp_params in self._kp_params_dict.items():\n num_keypoints = len(kp_params.keypoint_indices)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_HEATMAP)] = self._make_prediction_net_list(\n num_feature_outputs,\n num_keypoints,\n kernel_sizes=kp_params.heatmap_head_kernel_sizes,\n num_filters=kp_params.heatmap_head_num_filters,\n bias_fill=kp_params.heatmap_bias_init,\n name='kpt_heatmap',\n unit_height_conv=unit_height_conv)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_REGRESSION)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS * num_keypoints,\n kernel_sizes=kp_params.regress_head_kernel_sizes,\n num_filters=kp_params.regress_head_num_filters,\n name='kpt_regress',\n unit_height_conv=unit_height_conv)\n\n if kp_params.per_keypoint_offset:\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS * num_keypoints,\n kernel_sizes=kp_params.offset_head_kernel_sizes,\n num_filters=kp_params.offset_head_num_filters,\n name='kpt_offset',\n unit_height_conv=unit_height_conv)\n else:\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS,\n kernel_sizes=kp_params.offset_head_kernel_sizes,\n num_filters=kp_params.offset_head_num_filters,\n name='kpt_offset',\n unit_height_conv=unit_height_conv)\n\n if kp_params.predict_depth:\n num_depth_channel = (\n num_keypoints if kp_params.per_keypoint_depth else 1)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_DEPTH)] = self._make_prediction_net_list(\n num_feature_outputs, num_depth_channel, name='kpt_depth',\n unit_height_conv=unit_height_conv)\n\n if self._mask_params is not None:\n prediction_heads[SEGMENTATION_HEATMAP] = self._make_prediction_net_list(\n num_feature_outputs,\n num_classes,\n kernel_sizes=self._mask_params.mask_head_kernel_sizes,\n num_filters=self._mask_params.mask_head_num_filters,\n bias_fill=self._mask_params.heatmap_bias_init,\n name='seg_heatmap',\n unit_height_conv=unit_height_conv)\n\n if self._densepose_params is not None:\n prediction_heads[DENSEPOSE_HEATMAP] = self._make_prediction_net_list(\n num_feature_outputs,\n self._densepose_params.num_parts,\n bias_fill=self._densepose_params.heatmap_bias_init,\n name='dense_pose_heatmap',\n unit_height_conv=unit_height_conv)\n prediction_heads[DENSEPOSE_REGRESSION] = self._make_prediction_net_list(\n num_feature_outputs,\n 2 * self._densepose_params.num_parts,\n name='dense_pose_regress',\n unit_height_conv=unit_height_conv)\n\n if self._track_params is not None:\n prediction_heads[TRACK_REID] = self._make_prediction_net_list(\n num_feature_outputs,\n self._track_params.reid_embed_size,\n name='track_reid',\n unit_height_conv=unit_height_conv)\n\n # Creates a classification network to train object embeddings by learning\n # a projection from embedding space to object track ID space.\n self.track_reid_classification_net = tf.keras.Sequential()\n for _ in range(self._track_params.num_fc_layers - 1):\n self.track_reid_classification_net.add(\n tf.keras.layers.Dense(self._track_params.reid_embed_size))\n self.track_reid_classification_net.add(\n tf.keras.layers.BatchNormalization())\n self.track_reid_classification_net.add(tf.keras.layers.ReLU())\n self.track_reid_classification_net.add(\n tf.keras.layers.Dense(self._track_params.num_track_ids))\n if self._temporal_offset_params is not None:\n prediction_heads[TEMPORAL_OFFSET] = self._make_prediction_net_list(\n num_feature_outputs, NUM_OFFSET_CHANNELS, name='temporal_offset',\n unit_height_conv=unit_height_conv)\n return prediction_heads", "def __init__(self, classification_path):\n # TODO: Rodar novamente o KNN com a particao crisp 'otima' para reavaliar os valores de K\n self.data = list()\n self.class_data = np.loadtxt(classification_path, dtype=int)\n self.mfeat_fac_classifier = self.build_classifier(15, 0)\n self.mfeat_fou_classifier = self.build_classifier(13, 1)\n self.mfeat_kar_classifier = self.build_classifier(13, 2)", "def _multi_label_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if n_classes < 2:\n raise ValueError(\"n_classes must be > 1 for classification.\")\n return _MultiLabelHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)", "def _build_ner_head(self, bert_out):\n use_crf = self.config[\"model\"][\"ner\"][\"use_crf\"]\n num_labels = self.config[\"model\"][\"ner\"][\"num_labels\"]\n\n # dropout\n if (self.birnn_ner is None) or (self.config[\"model\"][\"ner\"][\"rnn\"][\"dropout\"] == 0.0):\n x = self.bert_dropout(bert_out, training=self.training_ph)\n else:\n x = bert_out\n\n # birnn\n if self.birnn_ner is not None:\n sequence_mask = tf.sequence_mask(self.num_pieces_ph)\n x = self.birnn_ner(x, training=self.training_ph, mask=sequence_mask)\n\n # pieces -> tokens\n # сделано так для того, чтобы в ElmoJointModel не нужно было переопределять данный метод\n if self.first_pieces_coords_ph is not None:\n x = tf.gather_nd(x, self.first_pieces_coords_ph) # [N, num_tokens_tokens, bert_dim or cell_dim * 2]\n\n # label logits\n logits = self.dense_ner_labels(x)\n\n # label ids\n if use_crf:\n with tf.variable_scope(\"crf\", reuse=tf.AUTO_REUSE):\n transition_params = tf.get_variable(\"transition_params\", [num_labels, num_labels], dtype=tf.float32)\n pred_ids, _ = tf.contrib.crf.crf_decode(logits, transition_params, self.num_tokens_ph)\n else:\n pred_ids = tf.argmax(logits, axis=-1)\n transition_params = None\n\n return logits, pred_ids, transition_params", "def __init__(self, classification, extras=[]):\n self.model_list = []\n self._generate_model_list(classification)\n self.model_list.extend(extras)\n self.classification = classification", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])", "def build_simple_cnn_text_classifier(\n tok2vec, nr_class, exclusive_classes: bool = ..., **cfg\n):\n ...", "def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):\n return pred", "def build_step(self):\n\n pass", "def __init__(self,\n top_k=1, # type: int\n conf_threshold=None, # type: Optional[thelper.typedefs.Number]\n class_names=None, # type: Optional[List[AnyStr]]\n target_name=None, # type: Optional[AnyStr]\n viz_count=0, # type: int\n report_count=None, # type: Optional[int]\n log_keys=None, # type: Optional[List[AnyStr]]\n force_softmax=True, # type: bool\n format=None, # type: Optional[AnyStr]\n ): # type: (...) -> None\n assert isinstance(top_k, int) and top_k > 0, \"invalid top-k value\"\n assert conf_threshold is None or (isinstance(conf_threshold, (float, int)) and 0 < conf_threshold <= 1), \\\n \"classification confidence threshold should be 'None' or float in ]0, 1]\"\n assert isinstance(viz_count, int) and viz_count >= 0, \"invalid image count to visualize\"\n assert report_count is None or (isinstance(report_count, int) and report_count >= 0), \\\n \"invalid report sample count\"\n assert log_keys is None or isinstance(log_keys, list), \"invalid list of sample keys to log\"\n self.top_k = top_k\n self.target_name = target_name\n self.target_idx = None\n self.conf_threshold = conf_threshold\n self.viz_count = viz_count\n self.report_count = report_count\n self.log_keys = log_keys if log_keys is not None else []\n self.force_softmax = force_softmax\n self.score = None\n self.true = None\n self.meta = None\n ClassNamesHandler.__init__(self, class_names)\n FormatHandler.__init__(self, format)", "def build_rpn_head(cfg):\n name = cfg.MODEL.RPN.HEAD_NAME\n return RPN_HEAD_REGISTRY.get(name)(cfg)", "def __init__(self, root, which_set, vocab, transform=None):\n self.root = root\n self.img_root = os.path.join(root, 'Img')\n self.ann = json.load(open(os.path.join(root, '{}_labels.json'.format(which_set)),'r'))\n\n self.vocab = vocab\n self.transform = transform\n self.img_list = list(self.ann.keys())\n # transfer categories id to labels\n self.cat2label = {}\n for i, k in enumerate(label_corpus):\n self.cat2label[k] = i\n\n self.num_cats = len(self.cat2label) \n\n # vgnome has varied number of annotations [1, 20], average 5.73\n # we still choose five as the parameter. It can be adjusted later on\n self.num_ann_onebatch = 5\n self.ids = [a for a in range(len(self.ann))]\n\n print('\\t {} train samples from {} set'.format(len(self.ids), which_set ))\n print('\\t {} of categories'.format(self.num_cats))", "def classification(self) -> 'outputs.CaseClassificationResponse':\n return pulumi.get(self, \"classification\")", "def build_model(cls, args, task):\n # print(\"In build_model !!!\")\n default_architecture(args)\n assert args.load_hf_bert_from != ''\n encoder = HuggingFaceBertEncoder(args, task.dictionary)\n\n return cls(args, encoder, task)", "def __init__(self, anchors: torch.Tensor, num_classes: int):\n super(YoloHead, self).__init__()\n self.anchors :torch.Tensor = anchors\n self.num_classes : int = num_classes", "def _build(self, s_in: Shape, s_out: Shape) -> Shape:\n self.head_module = BasicDartsAuxHead(init_pool_stride=3)\n return self.head_module.build(s_in, s_out.num_features())", "def GetClassification(self, *args, **kwargs):\n pass", "def classify(self):\r\n Classify(os.path.join(self.__path,'test.csv'),self.__rang,self.__numeric,self.__statistics,self.__k,self.__classes,self.__abs_n,self)\r\n self.view.Build_Button.configure(state=\"active\")", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def build_step(self):\n pass", "def build_step(self):\n pass", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def build_head(self, root):\n head = ET.SubElement(root, \"head\")\n for key, val, attr in self.headers:\n if val:\n ET.SubElement(head, # pylint: disable-msg=W0142\n key, **attr).text = val\n else:\n ET.SubElement(head, key, **attr) # pylint: disable-msg=W0142\n ET.SubElement(head, \"title\").text = self.title", "def train_classifier(self, class_id):\n raise NotImplementedError(\"Classifier training must be implemented first.\")", "def build(self):\n if self.predictor_constructor is None:\n print('[ERROR] build_predictor_fn not set, skip.')\n else:\n if hasattr(self, 'predictor'):\n print(\n \"[WARNING] predictor is already set, predictor is overridden\")\n del self.predictor\n with self.init_scope():\n self.predictor = self.predictor_constructor(\n **self.filter_sk_params(self.predictor_constructor)\n )\n self.update_device(self.device)", "def get_task(self, partition):\n train_indices, train_labels, test_indices, test_labels = [], [], [], []\n classes = list(partition.keys())\n sampled_classes = random.sample(classes, self.num_classes)\n random.shuffle(sampled_classes) # the same classes given a different label ordering is a new task\n for label, cls in zip(range(self.num_classes), sampled_classes):\n class_indices = random.sample(partition[cls], self.num_samples_per_class)\n train_indices.extend(class_indices[:self.num_train_samples_per_class])\n test_indices.extend(class_indices[self.num_train_samples_per_class:])\n train_labels.extend([label for i in range(self.num_train_samples_per_class)])\n test_labels.extend([label for i in range(self.num_samples_per_class - self.num_train_samples_per_class)])\n return train_indices, train_labels, test_indices, test_labels", "def __init__(self, head=None):\r\n self.head = head", "def _build(self, s_in: Shape, s_out: Shape) -> Shape:\n self.head_module = BasicDartsAuxHead(init_pool_stride=2)\n return self.head_module.build(s_in, s_out.num_features())", "def __init__(self):\n self.model = self._get_model()\n\n # NOTE: The order of this list hardcoded here, and needs to be changed when re-training the model!\n # When exporting the model in tflite format, the model_spec is lost, so we cannot do it like that:\n # classes = ['???'] * model.model_spec.config.num_classes\n # label_map = model.model_spec.config.label_map\n # for label_id, label_name in label_map.as_dict().items():\n # classes[label_id-1] = label_name\n self.classes = ['Baked Goods', 'Salad', 'Cheese', 'Seafood', 'Tomato']", "def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )", "def __init__(self, configuration, task, forward=None):\n self.conf = read_configuration(configuration)\n self.name = self.conf['name']\n self.labels = True if 'labels' in self.conf else False\n self.task = task\n self.client = self.task.client\n self.forward = forward", "def __init__(self, **config):\n super(Classifier, self).__init__()\n self.input_dim_drug = config['hidden_dim_drug']\n self.input_dim_protein = config['hidden_dim_protein']\n self.hidden_dims = config['cls_hidden_dims']\n self.visual_attention=config['visual_attention']\n dims = [self.input_dim_drug + self.input_dim_protein] + self.hidden_dims + [2]\n if config['attention']:\n if config['concatenation']:\n dims[0]+=config['cnn_target_filters'][-1]\n else:\n dims[0]=self.input_dim_drug+config['cnn_target_filters'][-1]\n self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(len(self.hidden_dims)+1)])\n self.dropout = nn.Dropout(0.25)\n self._initialize()", "def __init__(self):\n\n self._mh = MasterHead.get_head()", "def define_classifiers(self):\n raise NotImplementedError(\"Classifier options must be defined first.\")", "def build_classifier(self, merged_tweets_of_authors, truths, author_ids, original_tweet_lengths):\n\n\t\tprint(\"Building custom classifier\")\n\n\t\tdocs_train, docs_test, y_train, y_test = self.get_train_test(merged_tweets_of_authors, truths, author_ids, original_tweet_lengths)\n\t\tX_train, X_test, feature_names = self.extract_features(docs_train, docs_test, dim_reduce=False)\n\t\tself.cross_validate_model(X_train, y_train)\n\t\tself.train_and_test_model(X_train, y_train, X_test, y_test)\n\n\t\t# Log run time\n\t\tprint(\"%.2f seconds: Run finished\\n\" % time.process_time())", "def __init__(self, head=None):\n\n self.head = head", "def __init__(self, type='Labeler', description=None, ground_truth_label_facet=None, name=None, predicted_label_facet=None, source_dataset_id=None):\n\n self._type = None\n self._description = None\n self._ground_truth_label_facet = None\n self._name = None\n self._predicted_label_facet = None\n self._source_dataset_id = None\n\n self.type = type\n if description is not None:\n self.description = description\n self.ground_truth_label_facet = ground_truth_label_facet\n if name is not None:\n self.name = name\n self.predicted_label_facet = predicted_label_facet\n self.source_dataset_id = source_dataset_id", "def config_task(self) -> None:\n weights = self.hyperparams[\"weights\"]\n\n if self.hyperparams[\"model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n\n class_weights = None\n if isinstance(self.class_weights, torch.Tensor):\n class_weights = self.class_weights.to(dtype=torch.float32)\n elif hasattr(self.class_weights, \"__array__\") or self.class_weights:\n class_weights = torch.tensor(self.class_weights, dtype=torch.float32)\n\n self.loss = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=class_weights\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n if self.hyperparams[\"model\"] != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hyperparams.get(\"freeze_backbone\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hyperparams.get(\"freeze_decoder\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False", "def get_classification(self, image):\n #TODO implement light color prediction\n \n with self.graph.as_default():\n img_expand = np.expand_dims(image, axis=0)\n start = datetime.now() #start = datetime.datetime.now() if import datetime\n (boxes, scores, classes, num_detections) = self.sess.run(\n [self.boxes, self.scores, self.classes, self.num_detections],\n feed_dict={self.image_tensor: img_expand}) \n end = datetime.now() #end = datetime.datetime.now()\n c = end - start\n #rospy.logwarn(\"tl_classifier - Image predicted in: {0} seconds\".format(c.total_seconds()))\n #print(c.total_seconds())\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n print('tl_classifier - CLASSES: 1=Green, 2=Red, 3=Yellow, 4=Unknown: ', classes[0])\n #print('tl_classifier - SCORES: ', scores[0])\n #print('tl_classifier - TrafficLight.GREEN: ', TrafficLight.GREEN) = 2 CLASSES: 1\n #print('tl_classifier - TrafficLight.RED: ', TrafficLight.RED) = 0 CLASSES: 2\n #print('tl_classifier - TrafficLight.YELLOW: ', TrafficLight.YELLOW) = 1 CLASSES: 3\n #print('tl_classifier - TrafficLight.UNKNOWN: ', TrafficLight.UNKNOWN) = 4 CLASSES: 4\n\n if scores[0] > self.threshold:\n if classes[0] == 1:\n print('GREEN')\n return TrafficLight.GREEN\n elif classes[0] == 2:\n print('RED')\n return TrafficLight.RED\n elif classes[0] == 3:\n print('YELLOW')\n return TrafficLight.YELLOW\n else:\n rospy.logwarn(\"Light: UNKNOWN\")\n\n \n return TrafficLight.UNKNOWN", "def __init__(self, name, num_classes, X_mask=None) -> None:\n super(Baseline, self).__init__()\n\n # Get the featurizer, hidden_size and pretrained module.\n hidden_size, self.pretrained, self.phi = get_phi(name, X_mask)\n\n # Define the classifier layer.\n self.joint = nn.Linear(hidden_size, num_classes)", "def register_dependency_parse_head(\n model,\n args,\n name,\n num_classes=None,\n inner_dim=None,\n **kwargs,\n):\n if name in model.classification_heads:\n prev_num_classes = model.classification_heads[\n name].out_proj.out_features\n prev_inner_dim = model.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n print(\n 'WARNING: re-registering head \"{}\" with num_classes {} (prev: {}) '\n \"and inner_dim {} (prev: {})\".format(\n name,\n num_classes,\n prev_num_classes,\n inner_dim,\n prev_inner_dim,\n ))\n model.classification_heads[name] = DependencyParseHead(\n args.encoder_embed_dim,\n max_position=args.max_positions,\n num_classes0=args.num_classes0,\n )", "def __init__(self):\n self.label = \"Partition NNInput Files\"\n self.description = \"Partitions Neural Network class.dta of more than 200,000 records into files of 200,000 or less.\"\n self.canRunInBackground = False\n self.category = \"Neural network\"", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def _before_task(self, train_loader, val_loader):\n self._network.add_classes(self._task_size)\n\n self._task_idxes.append([self._n_classes + i for i in range(self._task_size)])\n\n self._n_classes += self._task_size\n print(\"Now {} examplars per class.\".format(self._m))", "def build (self):\n raise NotImplementedError", "def build(self):\n raise NotImplementedError", "def __init__(self, nclasses, device):\n super(HybridNN, self).__init__(nclasses, device)\n self.data_dev = qml.device(device, wires=self.req_qub_out)\n self.device = device\n self.model_dev = None\n self.nn = None\n self.bias = True", "def get_classification(self, image):\n # return TrafficLight.RED\n # TODO implement light color prediction\n # creating an image object \n img_np = np.array(image) \n\n # convert np array to tensor\n input_tensor = tf.convert_to_tensor(img_np)\n\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis, ...]\n\n\n detections = self.loaded(input_tensor)\n\n num_detections = int(detections.pop('num_detections'))\n\n # detection_classes should be ints.\n detections_dict = {key: value[0, :num_detections].numpy() for key, value in detections.items()}\n\n\n # detection_classes should be ints.\n detections_dict['detection_classes'] = detections_dict['detection_classes'].astype(np.int64)\n\n label_id_offset = 1\n\n # DEBUG - can do it in a cleaner way :0\n tl_classes = {3: 'green', 2: 'red'}\n top_classes_prediction = list(detections_dict['detection_classes']+label_id_offset)[:5] \n #print(top_classes_prediction)\n for i in range(len(top_classes_prediction)):\n if top_classes_prediction[i] == 2:\n top_classes_prediction[i] = 'green'\n elif top_classes_prediction[i] == 3:\n top_classes_prediction[i] = 'red'\n\n\n #print(\"--------->\", image_path, \"<-----------\")\n #print( top_classes_prediction ) \n #print(detections_dict['detection_scores'][:5], '\\n' )\n\n # basic red tl logic\n if top_classes_prediction[0] == 'red' and detections_dict['detection_scores'][0] >= 0.60:\n #print(\"-------------> RED TRAFFIC LIGHT <----------------\\n\")\n self.current_light = TrafficLight.RED\n #rospy.logwarn( \"----------------- Taffic light is RED !!! -------------------- \" )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n else:\n #print(\"No red traffic is detected\\n\")\n self.current_light = TrafficLight.GREEN\n #rospy.logwarn( \"----------------- You're good to go !!! --------: {0} - {1} \".format(top_classes_prediction[0], detections_dict['detection_scores'][0]) )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n\n return self.current_light", "def __init__(self):\n self.clf = DummyClassifier(strategy='most_frequent')", "def __init__(self):\n self.label = \"Categorical & Reclass\"\n self.description = \"Create fuzzy memberships for categorical data by first reclassification to integers and then division by an appropriate value.\"\n self.canRunInBackground = False\n self.category = \"Fuzzy Logic\\\\Fuzzy Membership\"", "def build(self) -> None:", "def __existence_classification__(self,task_id,shape,aggregations):\n\n # aggregations = {}\n\n # raw_classifications and clustering_results have different hierarchy orderings- raw_classifications\n # is better for processing data and clustering_results is better for showing the end result\n # technically we only need to look at the data from clustering_results right now but its\n # hierarchy is really inefficient so use raw_classifications to help\n\n # each shape is done independently\n\n # set - so if multiple tools create the same shape - we only do that shape once\n # for shape in set(marking_tasks[task_id]):\n\n\n # pretentious name but basically whether each person who has seen a subject thinks it is a true\n # positive or not\n existence_classification = {\"param\":\"subject_id\"}\n\n global_cluster_index = 0\n # clusters_per_subject = []\n\n # look at the individual points in the cluster\n for subject_id in aggregations.keys():\n if subject_id == \"param\":\n continue\n\n # gold standard pts may not match up perfectly with the given clusters -\n # for example, we could have a gold penguin at 10,10 but the users' cluster\n # is centered at 10.1,9.8 - same penguin though\n # so as we go through the clusters, we need to see which ones match up more closely\n # with the gold standard\n # if subject_id in gold_standard_clustering[0]:\n # # closest cluster and distance\n # gold_to_cluster = {pt:(None,float(\"inf\")) for pt in gold_standard_clustering[0][subject_id]}\n # else:\n # gold_to_cluster = None\n\n\n # clusters_per_subject.append([])\n\n # # in either case probably an empty image\n # if subject_id not in clustering_results:\n # continue\n # if task_id not in clustering_results[subject_id]:\n # continue\n\n if (shape+ \" clusters\") not in aggregations[subject_id][task_id]:\n # if none of the relevant markings were made on this subject, skip it\n continue\n\n all_users = aggregations[subject_id][task_id][shape+ \" clusters\"][\"all_users\"]\n\n for local_cluster_index in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n if local_cluster_index == \"all_users\":\n continue\n\n # extract the users who marked this cluster\n cluster = aggregations[subject_id][task_id][shape+ \" clusters\"][local_cluster_index]\n\n # todo - put this back when we support gold standard clustering\n # # is this user cluster close to any gold standard pt?\n # if subject_id in gold_standard_clustering[0]:\n # x,y = cluster[\"center\"]\n # for (gold_x,gold_y) in gold_to_cluster:\n # dist = math.sqrt((x-gold_x)**2+(y-gold_y)**2)\n # if dist < gold_to_cluster[(gold_x,gold_y)][1]:\n # gold_to_cluster[(gold_x,gold_y)] = local_cluster_index,dist\n #\n # # now repeat for negative gold standards\n # if subject_id in gold_standard_clustering[1]:\n # x,y = cluster[\"center\"]\n # min_dist = float(\"inf\")\n # closest= None\n # for x2,y2 in gold_standard_clustering[1][subject_id]:\n # dist = math.sqrt((x-x2)**2+(y-y2)**2)\n # if dist < min_dist:\n # min_dist = min(dist,min_dist)\n # closest = (x2,y2)\n # if min_dist == 0.:\n # assert (x,y) == closest\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 0\n\n users = cluster[\"users\"]\n\n ballots = []\n\n # todo - the 15 hard coded value - might want to change that at some point\n for u in all_users:\n if u in users:\n ballots.append((u,1))\n else:\n ballots.append((u,0))\n\n existence_classification[(subject_id,local_cluster_index)] = ballots\n # clusters_per_subject[-1].append(global_cluster_index)\n # global_cluster_index += 1\n\n # # note we don't care about why a cluster corresponds to a gold standard pt - that is\n # # it could be really close to given gold standards - the point is that it is close\n # # to at least one of them\n # if gold_to_cluster is not None:\n # for (local_cluster_index,dist) in gold_to_cluster.values():\n # # arbitrary threshold but seems reasonable\n # if dist < 1:\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 1\n\n existence_results = self.__task_aggregation__(existence_classification,task_id,{})#,mapped_gold_standard)\n assert isinstance(existence_results,dict)\n\n for subject_id,cluster_index in existence_results:\n new_results = existence_results[(subject_id,cluster_index)][task_id]\n # new_agg = {subject_id: {task_id: {shape + \" clusters\": {cluster_index: {\"existence\": new_results}}}}}\n # aggregations = self.__merge_results__(aggregations,new_agg)\n aggregations[subject_id][task_id][shape + \" clusters\"][cluster_index][\"existence\"] = new_results\n # if subject_id not in aggregations:\n # aggregations[subject_id] = {}\n # if task_id not in aggregations[subject_id]:\n # aggregations[subject_id][task_id] = {}\n # if (shape + \" clusters\") not in aggregations[subject_id][task_id]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"] = {}\n # # this part is probably redundant\n # if cluster_index not in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index] = {}\n #\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index][\"existence\"] = existence_results[(subject_id,cluster_index)]\n\n return aggregations", "def generateTaskName(self):\n brokenComponent = ['head','hand','leg','body','hand','leg']\n for component in brokenComponent:\n self.enqueue(Task(component))", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def make_mdn_heads(self, config):\n raise NotImplementedError", "def create_model(self):\n self.classifier = DecisionTreeClassifier(max_depth=1)", "def class2onehot(class_labels, seq_len, batchsize, num_task):\n\n\n one_hot = torch.FloatTensor(batchsize,seq_len,num_task)\n one_hot.zero_()\n one_hot = one_hot.scatter_(1, seq_len,class_labels, 1)\n\n return one_hot", "def __init__(\n self,\n training_data: Dict[int, Dict[str, Union[List[Configuration], np.ndarray]]],\n **kwargs\n ):\n\n if kwargs.get('instance_features') is not None:\n raise NotImplementedError()\n super().__init__(**kwargs)\n self.training_data = training_data\n\n self.categorical_mask = np.array(self.types) > 0\n self.n_categories = np.sum(self.types)\n\n torch.manual_seed(self.seed)\n self.rng = np.random.RandomState(self.seed)\n\n X_train = []\n y_train = []\n for task in training_data:\n Y = training_data[task]['y']\n y_scaled = copula_transform(Y)\n configs = training_data[task]['configurations']\n X = convert_configurations_to_array(configs)\n for x, y in zip(X, y_scaled):\n X_train.append(x)\n y_train.append(y)\n X_train = np.array(X_train)\n X_train = self._preprocess(X_train)\n y_train = np.array(y_train)\n\n class NLLHLoss(nn.Module):\n\n def forward(self, input, target):\n # Assuming network outputs var\n std = torch.log(1 + torch.exp(input[:, 1])) + 10e-12\n mu = input[:, 0].view(-1, 1)\n\n # Pytorch Normal indeed takes the standard deviation as argument\n n = torch.distributions.normal.Normal(mu, std)\n loss = n.log_prob(target)\n return -torch.mean(loss)\n\n # TODO we could add embeddings for categorical hyperparameters here to improve performance?\n model = torch.nn.Sequential(\n torch.nn.Linear(X_train.shape[1], 50).float(),\n torch.nn.Dropout(0.1),\n torch.nn.ReLU(),\n torch.nn.Linear(50, 50).float(),\n torch.nn.Dropout(0.1),\n torch.nn.ReLU(),\n torch.nn.Linear(50, 50).float(),\n torch.nn.Dropout(0.1),\n torch.nn.ReLU(),\n torch.nn.Linear(50, 2).float(),\n )\n loss_fn = NLLHLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer,\n step_size=1000,\n gamma=0.2)\n for iter in range(3000):\n\n batch = self.rng.choice(len(X_train), size=64)\n x_batch = torch.tensor(X_train[batch]).float()\n y_batch = torch.tensor(y_train[batch]).float()\n\n y_pred = model(x_batch)\n\n # Compute and print loss.\n loss = loss_fn(y_pred, y_batch)\n if iter % 100 == 99:\n print(iter, loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n self.prior = model", "def fast_rcnn_head_generator(params):\n head_params = params.frcnn_head\n return fast_rcnn_head.FastrcnnHead(\n params.architecture.num_classes,\n params.architecture.num_attributes,\n head_params.num_convs,\n head_params.num_filters,\n head_params.use_separable_conv,\n head_params.num_fcs,\n head_params.fc_dims,\n params.batch_norm_activation.activation,\n head_params.use_batch_norm,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation))", "def initialize_from_task(MODE=\"reviews\"):\n labels = []\n task_name = \"\"\n dataset_uuid = \"\"\n\n if MODE == \"reviews\":\n labels = {0: \"ASPECT\", 1:\"OPINION\"} # TODO: should be erased before deploy\n task_name = \"Restaurant review aspect/opinion extraction: Aspect or Opinion\"\n dataset_uuid = \"reviews\"\n elif MODE == \"hotel\":\n labels = {0: \"ASPECT\", 1:\"OPINION\"} # TODO: should be erased before deploy\n task_name = \"Hotel review aspect/opinion extraction: Aspect or Opinion\"\n dataset_uuid = \"hotel\"\n elif MODE == \"bc5cdr\":\n labels = {0: \"CHEMICAL\", 1:\"DISEASE\"} # TODO: should be erased before deploy\n task_name = \"Bio-med chemical/disease extraction: Chemical or Disease\"\n dataset_uuid = \"bc5cdr\"\n elif MODE == \"bc5cdr_example\":\n labels = {0: \"CHEMICAL\", 1:\"DISEASE\"} # TODO: should be erased before deploy\n task_name = \"Bio-med chemical/disease extraction: Chemical or Disease\"\n dataset_uuid = \"bc5cdr_example\"\n else:\n raise Error('MODE={} is not recognized.'.format(MODE))\n\n project = Project(name=task_name, dataset_uuid=dataset_uuid, labels=labels)\n #project.launch()\n return project", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def build_maskiou_head(cfg):\n name = cfg.MODEL.ROI_MASKIOU_HEAD.NAME\n return ROI_MASKIOU_HEAD_REGISTRY.get(name)(cfg)", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def __tool_classification__(self,task_id,shape,aggregations):\n\n print \"tool classification - more than one tool could create \" +str(shape) + \"s in task \" + str(task_id)\n\n if aggregations == {}:\n print \"warning - empty classifications\"\n return {}\n\n # only go through the \"uncertain\" shapes\n tool_classifications = {}\n\n for subject_id in aggregations:\n # look at the individual points in the cluster\n\n for cluster_index,cluster in aggregations[subject_id][task_id][shape+ \" clusters\"].items():\n # all_users just gives us a list of all of the users who have seen this subject\n # not relevant here\n if cluster_index == \"all_users\":\n continue\n\n # which users marked this cluster\n users = cluster[\"users\"]\n # which tool each individual user used\n tools = cluster[\"tools\"]\n assert len(tools) == len(users)\n\n # in this case, we want to \"vote\" on the tools\n ballots = zip(users,tools)\n\n tool_classifications[(subject_id,cluster_index)] = ballots\n\n # classify\n print \"tool results classification\"\n tool_results = self.__task_aggregation__(tool_classifications,task_id,{})\n assert isinstance(tool_results,dict)\n\n for subject_id,cluster_index in tool_results:\n\n new_results = tool_results[(subject_id,cluster_index)][task_id]\n # the clustering results already exist so we are just adding more data to it\n aggregations[subject_id][task_id][shape + \" clusters\"][cluster_index][\"tool_classification\"] = new_results\n\n return aggregations", "def build(self, hp, inputs=None):\n input_node = inputs\n # TODO: modify default hash_size, current version is wrong when category of a feature is more than 10000\n hash_size = self.hash_size or [hp.Choice('hash_size', [10000], default=10000)\n for _ in range(self.num_of_fields)]\n embedding_dim = self.embedding_dim or hp.Choice('embedding_dim', [8, 16], default=8)\n output_node = tf.stack(\n [\n tf.keras.layers.Embedding(hash_size[col_id], embedding_dim)(input_node[0][:, col_id])\n for col_id in range(self.num_of_fields)\n ],\n axis=1\n )\n return output_node", "def __init__(self, split, task, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,\n unpack_data=True, deterministic=True, fp16=False, save_interval=5, already_trained_on=None, use_progress=True,\n identifier=default_plans_identifier, extension='multi_head', tasks_list_with_char=None, trainer_class_name=None):\n # -- Initialize using parent class -- #\n super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16)\n\n # -- Set the provided split -- #\n self.split = split\n\n # -- Set the name of the head which is referred to as a task name -- #\n self.task = task\n\n # -- Set identifier to use for building the .json file that is used for restoring states -- #\n self.identifier = identifier\n\n # -- Store the fold for tracking and saving in the self.already_trained_on file -- #\n self.fold = fold\n\n # -- Initialize or set self.already_trained_on dictionary to keep track of the trained tasks so far for restoring -- #\n if already_trained_on is not None:\n self.already_trained_on = already_trained_on # Use provided already_trained on\n # -- If the current fold does not exists initialize it -- #\n if self.already_trained_on.get(str(self.fold), None) is None:\n self.already_trained_on[str(self.fold)] = {'finished_training_on': list(), 'start_training_on': None, 'finished_validation_on': list(),\n 'used_identifier': self.identifier, 'prev_trainer': ['None'], 'val_metrics_should_exist': False,\n 'checkpoint_should_exist' : False, 'tasks_at_time_of_checkpoint': list(),\n 'active_task_at_time_of_checkpoint': None} # Add current fold as new entry\n else: # It exists, then check if everything is in it\n pass\n else:\n self.already_trained_on = {str(self.fold): {'finished_training_on': list(), 'start_training_on': None, 'finished_validation_on': list(),\n 'used_identifier': self.identifier, 'prev_trainer': ['None'], 'val_metrics_should_exist': False,\n 'checkpoint_should_exist' : False, 'tasks_at_time_of_checkpoint': list(),\n 'active_task_at_time_of_checkpoint': None}}\n \n # -- Set the path were the trained_on file will be stored: grand parent directory from output_folder, ie. were all tasks are stored -- #\n self.trained_on_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(self.output_folder))))\n \n # -- Set save_every, so the super trainer class creates checkpoint individually and the validation metrics will be filtered accordingly -- #\n self.save_every = save_interval\n\n # -- Extract network_name that might come in handy at a later stage -- #\n # -- For more details on how self.output_folder is built look at get_default_configuration -- #\n help_path = os.path.normpath(self.output_folder) # Normalize path in order to avoid errors\n help_path = help_path.split(os.sep) # Split the path using '\\' seperator\n self.network_name = help_path[-5] # 5th element from back is the name of the used network\n\n # -- Set trainer_class_name -- #\n self.trainer_class_name = self.__class__.__name__ #trainer_class_name\n\n # -- Set the extension for output file -- #\n self.extension = extension\n\n # -- Ensure that it is a tuple and that the first element is a list and second element a string -- #\n assert isinstance(tasks_list_with_char, tuple) and isinstance(tasks_list_with_char[0], list) and isinstance(tasks_list_with_char[1], str),\\\n \"tasks_list_with_char should be a tuple consisting of a list of tasks as the first and a string \"+\\\n \"representing the character that is used to join the tasks as the second element..\"\n \n # -- Store the tuple consisting of a list with tasks and the character that should be used to join the tasks -- #\n self.tasks_list_with_char = tasks_list_with_char\n \n # -- Set tasks_joined_name for validation dataset building -- #\n self.tasks_joined_name = join_texts_with_char(self.tasks_list_with_char[0], self.tasks_list_with_char[1])\n \n # -- Define a dictionary for the metrics for validation after every nth epoch -- #\n self.validation_results = dict()\n\n # -- If -c is used, the self.validation_results need to be restored as well -- #\n # -- Check if the val_metrics should exist -- #\n if self.already_trained_on[str(self.fold)]['val_metrics_should_exist']:\n try:\n # -- Try to load the file -- #\n self.validation_results = load_json(join(self.output_folder, 'val_metrics.json'))\n except: # File does not exist\n assert False, \"The val_metrics.json file could not be loaded although it is expected to exist given the current state of the model.\"\n\n # -- Set use_prograss_bar if desired so a progress will be shown in the terminal -- #\n self.use_progress_bar = use_progress\n\n # -- Define the empty Multi Head Network which might be used before intialization, so there is no error thrown (rehearsal) -- #\n self.mh_network = None", "def __init__(self, set, task, root_dir, manifest_path, transform=None, return_pid = False):\n\n self.transform = transform\n self.set = set\n self.root_dir = root_dir\n self.manifest_path = manifest_path\n self.task = task\n self.image_dir = os.path.join(root_dir, 'data', 'imgs')\n self.return_pid = return_pid\n # should be ./nephro + data/imgs\n\n self.task_manifest = pd.read_csv(manifest_path)\n\n if self.set == \"train\":\n self.task_manifest = self.task_manifest[self.task_manifest['set'] == 'train']\n # print(self.task_manifest.shape)\n elif self.set == \"valid\":\n self.task_manifest = self.task_manifest[self.task_manifest['set'] == 'valid']\n # print(self.task_manifest.shape)\n elif self.set == 'all_images':\n print('MAKE SURE YOU ARE USING THE RIGHT MANIFEST FOR THIS!!!')\n self.task_manifest = self.task_manifest\n else:\n print('Set needs to be one of train, valid')\n\n # print(self.set)", "def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000):\n super(RelabelPooledCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0" ]
[ "0.6749117", "0.60415906", "0.60092235", "0.5853955", "0.5802875", "0.5693355", "0.5665179", "0.5637631", "0.5628371", "0.5621177", "0.56188375", "0.56188375", "0.56188375", "0.55739766", "0.5559742", "0.55255944", "0.5508331", "0.5490201", "0.5489764", "0.5484875", "0.5467933", "0.546658", "0.54648405", "0.5457147", "0.5408707", "0.54052955", "0.5382675", "0.53710043", "0.53617156", "0.5356519", "0.53242284", "0.5305026", "0.52942", "0.5268192", "0.52664655", "0.5258761", "0.5250395", "0.52450466", "0.524363", "0.5239285", "0.5237164", "0.5237164", "0.5236068", "0.52353674", "0.5225038", "0.52198994", "0.5219537", "0.5215727", "0.52149665", "0.52130765", "0.5205655", "0.5205535", "0.52013046", "0.5198134", "0.5195758", "0.51877606", "0.5185739", "0.51843375", "0.5182101", "0.5177792", "0.5177377", "0.5176603", "0.5176103", "0.51731145", "0.5158308", "0.5155798", "0.5155555", "0.5151799", "0.5147515", "0.5144822", "0.51380384", "0.51368296", "0.5134447", "0.51251465", "0.5124148", "0.51209766", "0.5118543", "0.51160973", "0.51128983", "0.5102737", "0.5099923", "0.5075668", "0.5075668", "0.5075668", "0.5075668", "0.5074423", "0.50649464", "0.50649464", "0.50649464", "0.50649464", "0.50649464", "0.50649464", "0.50649464", "0.50649464", "0.50649464", "0.5062886", "0.5061615", "0.5056029", "0.5055978", "0.50495636" ]
0.6520812
1
Evaluate a model on a dataset
def eval_model( self, model: nn.Module, batch_size: int = 32, data: Union[str, th.utils.data.Dataset] = "test", collate_fn: Optional[Callable] = None, by_example: bool = False, label_map: Optional[Callable] = None, nll: bool = False, ): # Set model to test mode mode = model.training model.train(mode=False) # Select dataset for evaluation dataset = data if isinstance(data, str): dataset = self.get_split(data) elif not isinstance(dataset, th.utils.data.Dataset): raise ValueError( "`data` must be a pytorch dataset or one of 'dev'/'valid'" f"/'test/'train', got {dataset.__class__.__name__} instead" ) # Dataloader data_loader = DataLoader( dataset, batch_size=batch_size, collate_fn=self.collate_fn if collate_fn is None else collate_fn, ) y, y_hat, all_nlls = [], [], [] for batch in data_loader: # Get model predictions with th.no_grad(): nlls, _, predicted = self.nll( model, batch, reduction="none", predict=True, ) # Track predictions and reference y.append(batch[-1]) y_hat.append(predicted) all_nlls.append(nlls) # Concatenate y = th.cat(y, dim=0).cpu() y_hat = th.cat(y_hat, dim=0).cpu() all_nlls = th.cat(all_nlls, dim=0).cpu() # Map predictions to labels (this is useful for single # head model evaluated on multiple tasks) if label_map: y_hat = th.tensor([label_map(y_hat_i.item()) for y_hat_i in y_hat]) # Task specific score if by_example: score = (y == y_hat).float() else: score = self.score(y_hat, y) nlls = nlls.mean() # Reset model to the original mode model.train(mode=mode) result = score if nll: result = (score, all_nlls) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)", "def evaluate(self, dataset):\n\t\tpass", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def evaluate(args, dev_dataset, model):\n\n if args.dynamic_batching:\n dev_sampler = CustomBatchSampler(dev_dataset, args.dev_batch_size)\n dev_dataloader = DataLoader(\n dev_dataset,\n batch_sampler=dev_sampler,\n num_workers=0,\n collate_fn=dynamic_padding_collate_fn\n )\n else:\n dev_sampler = SequentialSampler(dev_dataset)\n dev_dataloader = DataLoader(dev_dataset, sampler=dev_sampler,\n batch_size=args.dev_batch_size, num_workers=0)\n\n model.eval()\n loss_fn = nn.CrossEntropyLoss(ignore_index=0)\n iterator = tqdm(dev_dataloader, desc=\"Evaluation\", smoothing=0.05)\n loss_cum = None\n num_batch = 0\n for step, batch_cpu in enumerate(iterator):\n num_batch += 1\n\n batch = tuple(t.to(args.device) for t in batch_cpu)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Calculate loss of just the question part\n q_mask = (inputs['token_type_ids'] == 2)\n masked_labels = inputs['input_ids'].masked_fill(~q_mask, 0)\n shift_labels = masked_labels[..., 1:].contiguous()\n\n lm_logits = outputs[0]\n shift_logits = lm_logits[..., : -1, :].contiguous()\n loss = loss_fn(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n\n if loss_cum is None:\n loss_cum = loss\n else:\n loss_cum += loss\n\n model.train()\n\n return loss_cum.item() / num_batch", "def _evaluate_model(\n run_id: str, dataset_filename: str, dataset_sampling_column: str = None\n):\n fix_multiprocessing_with_keras_on_macos()\n\n run = _get_run(run_id)\n hyperparameters = run.config\n\n # no need to run this on a gpu since it's 1 epoch\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n with ModelBestH5File(run) as model_h5_filepath:\n model = _load_untrainable_model(hyperparameters, model_h5_filepath)\n\n model_name = run.config[\"model_name\"]\n x, y = _get_prepared_dataset(\n model_name, hyperparameters, dataset_filename, dataset_sampling_column\n )\n\n wandb.init(\n config={\n \"run_id\": run_id,\n \"dataset_filename\": dataset_filename,\n \"dataset_sampling_column\": dataset_sampling_column,\n },\n tags=[\"model-evaluation\"],\n )\n\n batch_size = hyperparameters[\"batch_size\"]\n label_scale_factor_mmhg = hyperparameters[\"label_scale_factor_mmhg\"]\n acceptable_error_mg_l = hyperparameters[\"acceptable_error_mg_l\"]\n acceptable_fraction_outside_error = hyperparameters[\n \"acceptable_fraction_outside_error\"\n ]\n\n # we're using fit() instead of evaluate() to get the functionality of these callbacks\n # training performance in the results should be ignored, as it can be affected by some\n # training-only layers such as dropout\n model.fit(\n x,\n y,\n batch_size=batch_size,\n epochs=1,\n verbose=2,\n validation_data=(x, y),\n callbacks=[\n ThresholdValMeanAbsoluteErrorOnCustomMetric(\n acceptable_fraction_outside_error=acceptable_fraction_outside_error,\n acceptable_error_mg_l=acceptable_error_mg_l,\n ),\n WandbCallback(verbose=1, monitor=\"val_adjusted_mean_absolute_error\"),\n LogPredictionsAndWeights(\n metric=\"val_adjusted_mean_absolute_error\",\n dataset=([], [], x, y),\n label_scale_factor_mmhg=label_scale_factor_mmhg,\n ),\n ],\n )\n\n # returning model and dataset for use in jupyter notebooks\n return model, (x, y)", "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def evaluate_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, device: torch.device):\n # Define a loss (mse loss)\n mse = torch.nn.MSELoss()\n # We will accumulate the mean loss in variable `loss`\n loss = torch.tensor(0., device=device)\n with torch.no_grad(): # We do not need gradients for evaluation\n # Loop over all samples in `dataloader`\n for data in tqdm.tqdm(dataloader, desc=\"scoring\", position=0):\n inputs, means, stds, sample_ids, targets = data\n # inputs = inputs.to(device)\n # targets = targets.to(device)\n\n # Get outputs for network\n outputs = model(inputs)\n # de-normalize\n for sample in range(outputs.shape[0]):\n outputs[sample, 0, :, :] *= stds[sample]\n outputs[sample, 0, :, :] += means[sample]\n\n # outputs = get_predictions_for_evaluation(inputs, outputs, targets)\n predictions_tensor, inputs_plus_predictions, targets_displayed, predictions_displayed = process_outputs_training(\n inputs, outputs,\n targets)\n # Calculate mean mse loss over all samples in dataloader (accumulate mean losses in `loss`)\n loss += (torch.stack([mse(output, target) for output, target in zip(predictions_tensor, targets)]).sum()\n / len(dataloader.dataset))\n return loss", "def evaluate_model(sess, model, data_set):\n total_cost = 0.0\n total_r_cost = 0.0\n total_kl_cost = 0.0\n for batch in range(data_set.num_batches):\n unused_orig_x, x, s = data_set.get_batch(batch)\n feed = {model.input_data: x, model.sequence_lengths: s}\n (cost, r_cost,\n kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)\n total_cost += cost\n total_r_cost += r_cost\n total_kl_cost += kl_cost\n\n total_cost /= (data_set.num_batches)\n total_r_cost /= (data_set.num_batches)\n total_kl_cost /= (data_set.num_batches)\n return (total_cost, total_r_cost, total_kl_cost)", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def evaluate_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, device: torch.device):\n # Define a loss (mse loss)\n mse = torch.nn.MSELoss()\n # We will accumulate the mean loss in variable `loss`\n loss = torch.tensor(0., device=device)\n with torch.no_grad(): # We do not need gradients for evaluation\n # Loop over all samples in `dataloader`\n for data in tqdm.tqdm(dataloader, desc=\"scoring\", position=0):\n # Get a sample and move inputs and targets to device\n inputs, targets = data\n inputs = inputs.to(device)\n targets = targets.to(device)\n\n # Get outputs for network\n outputs, flat = model(inputs)\n flat = flat.to(device)\n\n # Here we could clamp the outputs to the minimum and maximum values of inputs for better performance\n\n # Calculate mean mse loss over all samples in dataloader (accumulate mean losses in `loss`)\n loss += (torch.stack([mse(output, target) for output, target in zip(flat, targets)]).sum()\n / len(dataloader.dataset))\n return loss * 65025", "def run(self, data, training=False):\n # Set mode\n if training:\n self._model.train()\n else:\n self._model.eval()\n # Compute\n return self._model(data)", "def evaluate(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n **kwargs):\n raise NotImplementedError()", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def eval(self, dataset=None, criterion=None):\n # Recover the defaults, if missing\n dataset, criterion = self._resolve_defaults(testset=dataset, criterion=criterion)\n # Sample the test batch\n inputs, targets = dataset.sample(self._config)\n # Compute and return the evaluation result\n return criterion(self.run(inputs), targets)", "def evaluate(self, data: dataset.Dataset, batch_size: int = 32) -> Any:\n ds = data.gen_tf_dataset(\n batch_size, is_training=False, preprocess=self._preprocess)\n return self._model.evaluate(ds)", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def evaluate_model(model, model_name, X_train, Y_train, X_test, ground_truth):\n\tprint(\"\t\tModel [\" + model_name + \"]\")\n\tmodel.fit(X_train, Y_train)\n\tY_pred = model.predict(X_test).astype(int)\n\tregression = np.sqrt(metrics.mean_squared_error(ground_truth, Y_pred))\n\treturn regression", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def test_evaluate_model(sequential_model, model_data):\n _, _, _, _, x_test, y_test = model_data\n compile_model(sequential_model)\n output = evaluate_model(sequential_model, x_test, y_test, 64)\n assert len(output) == 2", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def evaluate(\n self,\n model: nn.Module,\n dataset: Union[NumpyArrayTuple, torch.utils.data.Dataset],\n verbose: bool = True,\n ) -> dict:\n return evaluate_module(\n model,\n dataset,\n self.loss_fn,\n device=self.device,\n metrics_map=self.metrics_map,\n batch_size=self.batch_size,\n verbose=verbose,\n )", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def assess_model(model, test_data, label):\n return model.score(test_data,label)", "def evaluate(dataset, model, args):\n device = None\n if torch.cuda.is_available():\n device = torch.cuda.current_device()\n\n model.to(device)\n model.train(False)\n\n test_loader = DataLoader(dataset, num_workers=4)\n average_mse = 0\n average_psnr = 0\n\n for batch in tqdm(test_loader):\n # Preprocess the data\n image, camera_pose = batch\n batch_size = len(image)\n\n image = torch.as_tensor(image, device=device)\n image = image[..., :3].permute(0, 3, 1, 2)\n camera_pose = torch.as_tensor(camera_pose, device=device)\n camera_pose = camera_pose[:, :3, :]\n\n # Run the inference\n with torch.no_grad():\n prediction = model(camera_pose)\n predicted_pixels = prediction[\"rgb_map\"]\n target_pixels = image.reshape(batch_size, 3, -1).transpose(1, 2)\n\n mse = mse_loss(predicted_pixels[-1], target_pixels)\n average_mse += mse\n\n psnr = mse_to_psnr(mse)\n average_psnr += psnr\n\n average_mse /= len(dataset)\n average_psnr /= len(dataset)\n\n output = {\n \"average_mse\": float(average_mse),\n \"average_psnr\": float(average_psnr),\n }\n print(output)\n\n with open(args.output_file, \"w\") as json_file:\n json.dump(output, json_file)\n print(\"Saved results to:\", args.output_file)", "def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)", "def evaluate(dataloader, model):\n with torch.no_grad():\n model.eval()\n count = 0\n correct = 0\n total_loss = 0.0\n reg_loss = 0.0\n l2_lambda = 0.00001\n criterion = nn.BCEWithLogitsLoss()\n for images_data, target_labels in tqdm(dataloader):\n if config.use_gpu:\n images_data = images_data.cuda()\n target_labels = target_labels.cuda()\n predicted_labels = model(images_data)\n total_loss += criterion(predicted_labels, target_labels)\n count += predicted_labels.shape[0]\n preds = predicted_labels.argmax(dim=1)\n targets = target_labels.argmax(dim=1)\n correct += (torch.eq(preds, targets)).sum().item()\n \n l2_reg = torch.tensor(0.)\n if config.use_gpu:\n l2_reg = l2_reg.cuda()\n for param in model.parameters():\n l2_reg += torch.norm(param)\n reg_loss += l2_lambda * l2_reg\n\n total_loss += reg_loss\n accuracy = correct * 1.0 / count\n return accuracy, total_loss.item()", "def evaluate(self, dataset, *args, **kwargs):\n\n losses = []\n for sample in dataset:\n output = self.predict(sample, *args, **kwargs)\n losses.append(self.metric_loss(output, sample, *args, **kwargs))\n\n return losses", "def eval(self):\n self.train(mode=False)", "def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds", "def evaluate_model(model, testset):\n\n # Sort data by top level label to ease inspection\n testset = testset.sort_using_layer(-1, reverse=True)\n\n # Feed the samples to the model to obtain each layers' activations\n v = testset.get_layer(0)\n hs = model.transform(v)[1:]\n\n # Read model weights\n ws = [params['w'] for params in model.parameters]\n del params\n\n # Take the (hidden) labels from the data set\n ls = testset.get_layers()[1:]\n\n # In each layer, reorder and invert neurons to match best with the labels\n for i in range(len(ls)):\n hs[i], ws[i] = align_with_labels(ls[i], hs[i], ws[i])\n del i\n\n # Measure correlations, etcetera\n metrics = compare(ls, hs)\n\n # Simply return a dict with all used variables\n return locals()", "def evaluate_full_dataset(\n self, data_loader: torch.utils.data.DataLoader, model: nn.Module\n ) -> Dict[str, Any]:\n pass", "def evaluate(model, data):\n n_targets = 0\n n_correct_predictions = 0\n\n # Set the model on evaluatio mode.\n model.eval()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[evaluate] batch accuracy: 0.000',\n leave=False)\n\n # Loop through validation batches.\n for inputs, targets in data:\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n\n # Choose the class with maximum probability.\n _, predictions = torch.max(outputs, 1)\n\n accuracy = (predictions == targets).sum().item() / len(targets)\n progress_bar.update(1)\n progress_bar.set_description(\n '[evaluate] batch accuracy: {accuracy:.3f}'.format(\n accuracy=accuracy))\n\n # Accumulate targets and correct predictions count.\n n_targets += len(targets)\n n_correct_predictions += (predictions == targets).sum().item()\n\n # Close progress bar.\n progress_bar.close()\n\n return n_correct_predictions / n_targets", "def evaluateModel(model, val_data, abs_idx2word, device, batch_size):\n #modify abs_idx2word by removing pad tokens so as to correctly calculate Reouge scores\n abs_idx2word[0] = ''\n\n #data setup\n val_data.move_to(torch.device('cpu')) #keep data on cpu\n val_dataloader = data.DataLoader(val_data, batch_size=batch_size, shuffle=True, num_workers=0)\n #model instantiation\n model = model.to(device=device)\n #evaluation\n logger.debug(f'\\tModel eval on validation data...')\n r1, r2, rl = evaluate.evaluate_model(model, val_dataloader, abs_idx2word, device, print_example=True)\n logger.debug(f'\\nRouge-1 is {r1:.4f}, Rouge-2 is {r2:.4f}, and Rouge-l is {rl:.4f}')", "def evaluate(model, val_data, epoch):\n print('validating')\n\n # 设置为评估模式 \n model.eval() \n\n val_loss = []\n with torch.no_grad():\n DEVICE = config.DEVICE\n\n val_dataloader = DataLoader(dataset=val_data,\n batch_size=config.batch_size,\n shuffle=True,\n pin_memory=True, drop_last=True,\n collate_fn=collate_fn)\n\n for batch, data in enumerate(tqdm(val_dataloader)):\n\n x, y, x_len, y_len, oov, len_oovs = data\n\n if config.is_cuda:\n x = x.to(DEVICE)\n y = y.to(DEVICE)\n x_len = x_len.to(DEVICE)\n len_oovs = len_oovs.to(DEVICE)\n\n loss = model(x, x_len, y, len_oovs, batch=batch, \n num_batches=len(val_dataloader),\n teacher_forcing=True)\n\n val_loss.append(loss.item())\n\n return np.mean(val_loss)", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def evaluate(model, datagen, X_test, Y_test, batch_size, save_folder_path=None):\n\n print(\"[INFO] Evaluating model...\")\n\n scores = model.evaluate_generator(\n datagen.flow(X_test, Y_test, batch_size=batch_size),\n verbose=1)\n \n print(\"[INFO] Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n \n if save_folder_path is not None:\n # Write results to path\n assert os.path.isdir(save_folder_path) == True, \"Unable to save evaluation results, save_folder_path is not a folder\"\n eval_results_path = save_folder_path + \"/eval_results.txt\"\n eval_handle = open(eval_results_path, 'w')\n eval_handle.write(\"Model name: {}\\n\\n\".format(MODEL_NAME))\n eval_handle.write(\"Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n eval_handle.close()", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def _evaluate_fn(model, dataset):\n # Reset the local variables so that the returned metrics are computed using\n # the given data. Similar to the `reset_states` method of `tf.metrics.Metric`.\n for var in model.local_variables:\n if var.initial_value is not None:\n var.assign(var.initial_value)\n else:\n var.assign(tf.zeros_like(var))\n\n def eval_fn(dummy_state, batch):\n \"\"\"Evaluates the model on a batch.\"\"\"\n model.forward_pass(batch, training=False)\n return dummy_state\n\n # Evaluate on the dataset.\n dataset.reduce(initial_state=0, reduce_func=eval_fn)\n\n # Obtain the metrics.\n results = collections.OrderedDict()\n local_outputs = model.report_local_outputs()\n for name, metric in local_outputs.items():\n if isinstance(metric, list) and (len(metric) == 2):\n # Some metrics returned by `report_local_outputs()` can have two scalars:\n # one represents `sum`, and the other represents `count`. Ideally, we want\n # to return a single scalar for each metric.\n results[name] = metric[0] / metric[1]\n else:\n results[name] = metric[0] if isinstance(metric, list) else metric\n return results", "def evaluate(data_loader, model, device):\n model.eval()\n\n loss_ = []\n with torch.no_grad():\n for idx, batch in enumerate(data_loader):\n data = batch.to(device)\n outputs = model.forward(data)\n loss_.append(F.l1_loss(outputs, data).data.numpy())\n\n return np.mean(loss_)", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def evaluate_model(model, X_test, Y_test): \n #Make predictions with the model\n Y_pred = model.predict(X_test)\n #convert numpy output to dataframe and add columns\n Y_pred_df = pd.DataFrame(Y_pred)\n Y_pred_df.columns = Y_test.columns\n #Convert predictions and correct y values to float for faciliate comparison\n Y_pred_df = Y_pred_df.astype('float64')\n Y_test = Y_test.astype('float64')\n print_score(Y_test, Y_pred_df, 'weighted avg')", "def evaluate(self, predictor_model) -> Any:\n raise NotImplementedError()", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def finetune_and_evaluate_model(model: BertForSequenceClassification, dataset: SentenceComplexityDataset):\n if test_split_ratio:\n train_ds, test_ds = get_train_test_split(dataset, test_split_ratio)\n train_ft_ds = SentenceComplexityFinetuningDataset(train_ds)\n test_ft_ds = SentenceComplexityFinetuningDataset(test_ds)\n else:\n train_ft_ds = SentenceComplexityFinetuningDataset(dataset)\n test_ft_ds = None\n\n training_args = TrainingArguments(\"finetune_trainer\",\n evaluation_strategy=\"epoch\",\n logging_strategy=\"epoch\",\n per_device_train_batch_size=16,\n per_device_eval_batch_size=16)\n\n trainer = Trainer(model=model, args=training_args, train_dataset=train_ft_ds, eval_dataset=test_ft_ds)\n trainer.train()\n trainer.evaluate()\n\n model.save_pretrained(FINETUNED_BERT_MODEL_PATH)", "def eval_model(data, model):\n f = model.forward(data['image'])\n loss = model.loss(f,data['label'])\n\n y_predict = model.predict(f)\n\n count = 0\n for i in range(len(data['label'])):\n if data['label'][i] == y_predict[i]:\n count = count + 1\n\n acc = (count/len(data['label']))*100\n\n return loss, acc", "def evaluate(self, output_dir, test_data, device, verbose_logging=False):\r\n tokenizer = self.tokenizer\r\n # device = torch.device(\"cuda:0\")\r\n model = self.model\r\n model.to(device)\r\n args = self.args\r\n\r\n # # reassgin unique_id for features to keep order for federated learning situation\r\n # unique_id = 1000000000\r\n # for feature in self.test_dl.features:\r\n # feature.unique_id = unique_id\r\n # unique_id += 1\r\n\r\n examples = test_data.examples\r\n features = test_data.features\r\n\r\n eval_loss = 0.0\r\n nb_eval_steps = 0\r\n model.eval()\r\n\r\n # if args.n_gpu > 1:\r\n # model = torch.nn.DataParallel(model)\r\n\r\n if self.args.fp16:\r\n from torch.cuda import amp\r\n\r\n all_results = []\r\n for batch in tqdm(test_data, disable=args.silent, desc=\"Running Evaluation\"):\r\n batch = tuple(t.to(device) for t in batch)\r\n\r\n with torch.no_grad():\r\n inputs = {\r\n \"input_ids\": batch[1],\r\n \"attention_mask\": batch[2],\r\n \"token_type_ids\": batch[3],\r\n }\r\n\r\n if self.args.model_type in [\r\n \"xlm\",\r\n \"roberta\",\r\n \"distilbert\",\r\n \"camembert\",\r\n \"electra\",\r\n \"xlmroberta\",\r\n \"bart\",\r\n ]:\r\n del inputs[\"token_type_ids\"]\r\n\r\n example_indices = batch[4]\r\n\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\r\n\r\n if self.args.fp16:\r\n with amp.autocast():\r\n outputs = model(**inputs)\r\n eval_loss += outputs[0].mean().item()\r\n else:\r\n outputs = model(**inputs)\r\n eval_loss += outputs[0].mean().item()\r\n begin_idx = len(all_results)\r\n for i, _ in enumerate(example_indices):\r\n eval_feature = features[begin_idx + i]\r\n unique_id = int(eval_feature.unique_id)\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n # XLNet uses a more complex post-processing procedure\r\n result = RawResultExtended(\r\n unique_id=unique_id,\r\n start_top_log_probs=to_list(outputs[0][i]),\r\n start_top_index=to_list(outputs[1][i]),\r\n end_top_log_probs=to_list(outputs[2][i]),\r\n end_top_index=to_list(outputs[3][i]),\r\n cls_logits=to_list(outputs[4][i]),\r\n )\r\n else:\r\n result = RawResult(\r\n unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i]),\r\n )\r\n all_results.append(result)\r\n\r\n nb_eval_steps += 1\r\n\r\n eval_loss = eval_loss / nb_eval_steps\r\n\r\n prefix = \"test\"\r\n os.makedirs(output_dir, exist_ok=True)\r\n\r\n output_prediction_file = os.path.join(output_dir, \"predictions_{}.json\".format(prefix))\r\n output_nbest_file = os.path.join(output_dir, \"nbest_predictions_{}.json\".format(prefix))\r\n output_null_log_odds_file = os.path.join(output_dir, \"null_odds_{}.json\".format(prefix))\r\n\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n # XLNet uses a more complex post-processing procedure\r\n (all_predictions, all_nbest_json, scores_diff_json, out_eval,) = write_predictions_extended(\r\n examples,\r\n features,\r\n all_results,\r\n args.n_best_size,\r\n args.max_answer_length,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n None,\r\n model.config.start_n_top,\r\n model.config.end_n_top,\r\n True,\r\n tokenizer,\r\n verbose_logging,\r\n )\r\n else:\r\n all_predictions, all_nbest_json, scores_diff_json = write_predictions(\r\n examples,\r\n features,\r\n all_results,\r\n args.n_best_size,\r\n args.max_answer_length,\r\n args.do_lower_case,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n verbose_logging,\r\n True,\r\n args.null_score_diff_threshold,\r\n )\r\n\r\n return all_predictions, all_nbest_json, scores_diff_json, eval_loss", "def train_eval_model(model, model_name, X_train, y_train, X_test, y_test):\n\n model_predictions_train = model.predict(X_train) # Wyniki regresji dla zbioru treningowego\n model_mse_train = mean_squared_error(y_train, model_predictions_train) # MSE dla zbioru treningowego\n model_rmse_train = np.sqrt(model_mse_train) # RMSE dla zbioru treningowego\n model_predictions_test = model.predict(X_test)\n model_mse_test = mean_squared_error(y_test, model_predictions_test)\n model_rmse_test = np.sqrt(model_mse_test)\n # Kroswalidacja modelu\n model_scores = cross_val_score(model, X_train, y_train, scoring=\"neg_mean_squared_error\", cv=10)\n model_rmse_scores = np.sqrt(-model_scores)\n\n model_result = ResultDataRegressors(model_name, model, model_rmse_train, model_rmse_test, model_rmse_scores)\n return model_result", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def evaluate():\n\tmodel.eval()\n\tstddev = 1 # And mean=0\n\tfor batch_idx, (data, _) in enumerate(syn_test_loader):\n\t\tdata = data.cuda()\n\t\tif batch_idx == 0:\n\t\t\tnoise = torch.autograd.Variable(torch.randn(batch_size, bottleneck).cuda() * stddev)\n\t\t\tsample_representation(\"orig_nat\", data, noise)\n\t\t\tsample_representation(\"natural\", data, noise)\n\t\t\tsample_representation(\"orig_syn\", data, noise)\n\t\t\tsample_representation(\"synth\", data, noise)", "def evaluate(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, show_progress: bool = True,\n device: torch.device = torch.device('cuda:0')):\n with torch.no_grad():\n model.to(device=device)\n sum_cross_entropy = torch.nn.BCEWithLogitsLoss(reduction='sum').to(device=device)\n scoring_loss = 0.\n scoring_predictions = []\n scoring_labels = []\n for scoring_data in tqdm(dataloader, total=len(dataloader), desc=\"Evaluating model\",\n disable=not show_progress, position=1):\n \n # Get samples as lists\n labels, inputs, sequence_lengths, counts_per_sequence, sample_ids = scoring_data\n \n # Apply attention-based sequence reduction and create minibatch\n labels, inputs, sequence_lengths, n_sequences = model.reduce_and_stack_minibatch(\n labels, inputs, sequence_lengths, counts_per_sequence)\n \n # Compute predictions from reduced sequences\n logit_outputs = model(inputs, n_sequences)\n prediction = torch.sigmoid(logit_outputs)\n \n # Compute mean of losses on-the-fly\n scoring_loss += sum_cross_entropy(logit_outputs, labels[..., -1]) / len(dataloader.dataset)\n \n # Store predictions and labels\n scoring_predictions.append(prediction)\n scoring_labels.append(labels[..., -1])\n \n # Compute BACC, F1, and AUC score\n scoring_predictions = torch.cat(scoring_predictions, dim=0).float()\n scoring_predictions_threshold = (scoring_predictions > 0.5).float()\n scoring_labels = torch.cat(scoring_labels).float()\n \n scoring_labels = scoring_labels.cpu().numpy()\n scoring_predictions = scoring_predictions.cpu().numpy()\n scoring_predictions_threshold = scoring_predictions_threshold.cpu().numpy()\n \n roc_auc = metrics.roc_auc_score(scoring_labels, scoring_predictions, average=None)\n bacc = metrics.balanced_accuracy_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold)\n f1 = metrics.f1_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold, average='binary',\n pos_label=1)\n return roc_auc, bacc, f1, scoring_loss", "def evaluate(model, val_dataloader):\n # Put the model into the evaluation mode. The dropout layers are disabled during\n # the test time.\n model.eval()\n\n # Tracking variables\n val_accuracy = []\n val_loss = []\n\n # For each batch in our validation set...\n for batch in val_dataloader:\n # Load batch to GPU\n b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)\n\n # Compute logits\n with torch.no_grad():\n logits = model(b_input_ids, b_attn_mask)\n\n # Compute loss\n loss = loss_fn(logits, b_labels.long())\n val_loss.append(loss.item())\n\n # Get the predictions\n preds = torch.argmax(logits, dim=1).flatten()\n\n # Calculate the accuracy rate\n accuracy = (preds == b_labels).cpu().numpy().mean() * 100\n val_accuracy.append(accuracy)\n\n # Compute the average accuracy and loss over the validation set.\n val_loss = np.mean(val_loss)\n val_accuracy = np.mean(val_accuracy)\n\n return val_loss, val_accuracy", "def evaluate_model(X_train, X_test, y_train, y_test, batch_size, nb_epoch):\n model = Sequential()\n model.add(Dense(512, input_shape=(784,)))\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(10))\n model.add(Activation(\"softmax\"))\n model.compile(loss=\"categorical_crossentropy\",\n optimizer=RMSprop(),\n metrics=[\"accuracy\"])\n model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,\n verbose=1, validation_data=(X_test, y_test))\n results = model.evaluate(X_test, y_test, verbose=0)\n return results, model", "def evaluate_model(model, ds_valid):\n print(\"-- Evaluate Model:\")\n for features, labels in ds_valid:\n valid_step(model, features, labels)\n logs = \"\\nValid Loss: {}, Valid Accuracy: {}\"\n tf.print(tf.strings.format(logs, (valid_loss.result(), valid_metric.result())))\n valid_loss.reset_states()\n train_metric.reset_states()\n valid_metric.reset_states()", "def evaluate(args, model, data_iterator, params, mark='Val', verbose=True):\n # set model to evaluation mode\n model.eval()\n\n # id2tag dict\n idx2tag = {idx: tag for idx, tag in enumerate(params.tags)}\n\n true_tags = []\n pred_tags = []\n\n # a running average object for loss\n loss_avg = utils.RunningAverage()\n for batch in tqdm(data_iterator, unit='Batch', ascii=True):\n # to device\n batch = tuple(t.to(params.device) for t in batch)\n input_ids, input_mask, labels, _, _, word_ids, word_positions = batch\n\n batch_size, max_len = labels.size()\n\n # inference\n with torch.no_grad():\n # get loss\n loss = model(input_ids, attention_mask=input_mask.bool(), labels=labels, input_word_ids=word_ids,\n word_position_matrix=word_positions)\n if params.n_gpu > 1 and args.multi_gpu:\n loss = loss.mean() # mean() to average on multi-gpu.\n # update the average loss\n loss_avg.update(loss.item())\n\n # inference\n batch_output = model(input_ids, attention_mask=input_mask.bool(), input_word_ids=word_ids,\n word_position_matrix=word_positions)\n\n # 恢复标签真实长度\n real_batch_tags = []\n for i in range(batch_size):\n real_len = int(input_mask[i].sum())\n real_batch_tags.append(labels[i][:real_len].to('cpu').numpy())\n\n # List[int]\n pred_tags.extend([idx2tag.get(idx) for indices in batch_output for idx in indices])\n true_tags.extend([idx2tag.get(idx) for indices in real_batch_tags for idx in indices])\n # sanity check\n assert len(pred_tags) == len(true_tags), 'len(pred_tags) is not equal to len(true_tags)!'\n\n # logging loss, f1 and report\n metrics = {}\n f1 = f1_score(true_tags, pred_tags)\n accuracy = accuracy_score(true_tags, pred_tags)\n metrics['loss'] = loss_avg()\n metrics['f1'] = f1\n metrics['accuracy'] = accuracy\n metrics_str = \"; \".join(\"{}: {:05.2f}\".format(k, v) for k, v in metrics.items())\n logging.info(\"- {} metrics: \".format(mark) + metrics_str)\n\n # f1 classification report\n if verbose:\n report = classification_report(true_tags, pred_tags)\n logging.info(report)\n return metrics", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n return classification_report(Y_test, y_pred, target_names = category_names)", "def evaluate(model, loss_func, dataloader, metrics):\r\n model.eval()\r\n summ = []\r\n device = utils.get_device()\r\n with torch.no_grad():\r\n for data in dataloader:\r\n sentences1, starts1, ends1, sentences2, starts2, ends2, inputY = data\r\n inputY = inputY.to(device)\r\n output_batch = model(sentences1, starts1, ends1, sentences2, starts2, ends2)\r\n loss = loss_func(output_batch, inputY)\r\n output_batch = output_batch.data.cpu().numpy()\r\n inputY = inputY.data.cpu().numpy()\r\n summary_batch = {metric: metrics[metric](\r\n output_batch, inputY) for metric in metrics}\r\n summary_batch['loss'] = loss.item()\r\n summ.append(summary_batch)\r\n # print(\"summ:{}\".format(summ))\r\n metrics_mean = {metric: np.mean([x[metric]\r\n for x in summ]) for metric in summ[0]}\r\n metrics_string = \" ; \".join(\"{}: {:05.3f}\".format(k, v)\r\n for k, v in metrics_mean.items())\r\n logging.info(\"- Eval metrics : \" + metrics_string)\r\n return metrics_mean", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def evaluate(self):\n # Method variables definition\n X_train, X_test, y_train, y_test = dm.reshape_y_set_split_data(self.datasetManager)\n featureScaleDependentVariables = self.datasetManager.params.featureScaleDependentVariables\n\n # Feature Scaling\n X_scaler, X_train = dm.do_feature_scaling(X_train)\n if featureScaleDependentVariables:\n y_scaler, y_train = dm.do_feature_scaling(y_train)\n else:\n y_scaler = None\n y_train = self.datasetManager.y_train\n \n self.X_scaler = X_scaler\n self.y_scaler = y_scaler\n\n # Training the SVR model on the training set\n regressor = SVR(kernel = 'rbf')\n regressor.fit(X_train, y_train.ravel())\n self.regressor = regressor\n\n # Predicting the Test set results\n self.y_pred = y_scaler.inverse_transform(regressor.predict(X_scaler.transform(X_test))) if featureScaleDependentVariables else regressor.predict(X_test)\n \n # Returning the process result : the regression type and the predicted dependent variables set\n return [\"Support Vector Regression\", self.get_r2_score(y_test, self.y_pred)]", "def evaluate_model(model, X_test, Y_test, category_names): \n \n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred))\n display_results(Y_test, Y_pred)", "def eval_model(model, x_test, y_test, batch_size=None):\n if batch_size is None:\n batch_size = 128\n\n loss, acc = model.evaluate(x_test, y_test, batch_size=batch_size)\n confusion_matrix_model(model, y_test, x_test)\n return loss, acc", "def evaluate(model, tokenizer, dataset, lines, output_test_file, batch_size=32):\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=batch_size)\n\n print(\"*** Evaluating ***\")\n eval_loss = 0.0\n num_steps = 0\n preds = None\n out_label_ids = None\n for i, batch in enumerate(dataloader):\n if i % 200 == 199:\n print(\"=\", end=\"\")\n if i % 5000 == 4999:\n print(\"[Step \" + str(i+1) + \" / \" + str(len(dataloader)) + \"] \" )\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n labels = batch[3]\n outputs = model(input_ids=batch[0], attention_mask=batch[1], labels=labels)\n tmp_eval_loss, logits = outputs[:2]\n eval_loss += tmp_eval_loss.mean().item()\n \n num_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / num_steps\n \n preds_label = np.argmax(preds, axis=1)\n \n accuracy = (preds_label == out_label_ids).mean()\n output_dir = os.path.dirname(output_test_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n with open(output_test_file, \"w\") as writer:\n all_logits = preds.tolist()\n for i, logit in enumerate(all_logits):\n line = '<CODESPLIT>'.join(\n [item.encode('ascii', 'ignore').decode('ascii') for item in lines[i]])\n\n writer.write(line + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\\n')\n print(\"Accuracy =\", str(accuracy))\n\n return accuracy", "def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:\n pass", "def evaluate(model_object, X, y):\n \n # calcuated accuracy\n accuracy = model_object.score(X, y)\n \n return accuracy", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def evaluate(self, model, X_train, X_test, y_train, y_test):\n\n model.fit(X_train,y_train)\n y_pred = model.predict(X_test)\n R2 = r2_score(y_test, y_pred)\n MAE = round(mape(y_test, y_pred), 2)\n RMSE = round(rmse(y_test, y_pred), 2)\n\n res = {'Model': self.model, 'R2' : R2, 'MAPE': MAE, 'RMSE': RMSE}\n return res", "def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def eval_model(config, period, test_data):\n if config.network == 'MLPwithGAN':\n model = MLPwithGAN(config)\n elif config.network == 'MLP':\n model = MLP(config)\n elif config.network == 'LSTM':\n model = VanillaLSTM(config)\n elif config.network == 'CNN':\n model = CNNfeature(config)\n else:\n raise Exception('Unknown model type:{}'.format(config.network))\n\n if config.ensemble:\n m = model\n model = []\n\n for i in glob(gen_path(config.path, str(period)) + '/m*'):\n m.load_state_dict(\n torch.load(gen_path(i, filename=config.network + '.pkl')))\n m.to(config.device)\n m.eval()\n model.append(m)\n else:\n model.load_state_dict(\n torch.load(gen_path(config.path, str(period), 'model', filename=config.network + '.pkl')))\n model.to(config.device)\n model.eval()\n dataloader_test = test_data[0]\n test_date = test_data[1]\n test_symbol = test_data[2]\n sc_y = joblib.load(gen_path(config.path, str(period), 'scaler', filename='training_sc_y.pkl'))\n predict_y_test, real_y_test, valid_index_test = make_prediction(dataloader_test, sc_y, model, config)\n\n stock_score = pd.DataFrame()\n stock_score[\"symbol\"] = test_symbol[valid_index_test]\n stock_score[\"score\"] = predict_y_test\n stock_score['truth'] = real_y_test\n stock_score[\"date\"] = test_date[valid_index_test]\n stock_score = stock_score.sort_values(by=[\"date\"])\n stock_score.to_csv(gen_path(config.path, 'stock_score', filename=str(period) + '.csv'), index=False)", "def evaluate(self, dataloader):\n self.model.eval()\n losses = []\n total, correct = 0., 0.\n with torch.no_grad():\n for (inputs, targets) in dataloader:\n inputs = inputs.to(self.device)\n targets = targets.to(self.device)\n\n outputs = self.model(inputs)\n loss = self.criterion(outputs, targets)\n losses.append(loss.item())\n\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum().item()\n\n accuracy = 100. * correct / total\n return round(accuracy, 4), np.mean(losses)", "def evaluate_model(model, instances, labels):\n prediction = model.predict(np.array(instances))\n\n prediction_inv = inverse_normalization(prediction)\n labels_inv = inverse_normalization(labels)\n mse = mean_squared_error(labels_inv, prediction_inv)\n\n return mse", "def evaluate_model(model, X_test, Y_test, category_names):\n \n Y_pred = model.predict(X_test)\n \n print(classification_report(Y_test.values, Y_pred, target_names=category_names))", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n source_vocab_filepath = os.path.join(args.model, 'source.vocab')\n source_vocab = Vocab(vocab_filepath=source_vocab_filepath)\n target_vocab_filepath = os.path.join(args.model, 'target.vocab')\n target_vocab = Vocab(vocab_filepath=target_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['with_attention']:\n decoder = Attention(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n indexes = putils.index_dataset(\n args.data, source_vocab.item2idx, target_vocab.item2idx,\n dataset_params['is_character_based'], dataset_params['max_seq_len'],\n dataset_params['is_reversed'])\n if args.random > 0:\n random.shuffle(indexes)\n for seq_num in range(args.random):\n seq = indexes[seq_num]\n print('-'*80)\n print('>', ' '.join([source_vocab.idx2item[idx]\n for idx in seq[0]]))\n print('=', ' '.join([target_vocab.idx2item[idx]\n for idx in seq[1]]))\n # TODO: add support for OOV\n predicted_idx, _ = _decode(seq[0], encoder, decoder,\n checkpoint['with_attention'],\n dataset_params['max_seq_len'])\n print('<', ' '.join([target_vocab.idx2item[idx]\n for idx in predicted_idx]))\n else:\n _evaluate(indexes, encoder, decoder, target_vocab, checkpoint,\n dataset_params)", "def __call__(self, predictor_model) -> None:\n self.save_result(self.evaluate(predictor_model))", "def evaluate(self, x, y, batch_size=None, **kwargs):\n if not batch_size:\n batch_size = self.batch_size\n return self.model.evaluate(x, y, batch_size, **kwargs)", "def evaluate(self, dataset, metric='auto', verbose=True, batch_size=64):\n if(batch_size < 1):\n raise ValueError(\"'batch_size' must be greater than or equal to 1\")\n\n extracted_features = self._extract_features(dataset, verbose=verbose, batch_size=batch_size)\n extracted_features[self.target] = dataset[self.target]\n return self.classifier.evaluate(extracted_features, metric = metric)", "def evaluate(self, batch_x, batch_y):\n raise NotImplementedError()", "def evaluate(model,loss_fn, val_dataloader):\r\n # Put the model into the evaluation mode. The dropout layers are disabled during\r\n # the test time.\r\n model.eval()\r\n\r\n # Tracking variables\r\n val_accuracy = []\r\n val_loss = []\r\n\r\n # For each batch in our validation set...\r\n for batch in val_dataloader:\r\n # Load batch to GPU\r\n b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)\r\n\r\n # Compute logits\r\n with torch.no_grad():\r\n logits = model(b_input_ids, b_attn_mask)\r\n\r\n # Compute loss\r\n loss = loss_fn(logits, b_labels)\r\n val_loss.append(loss.item())\r\n\r\n # Get the predictions\r\n preds = torch.argmax(logits, dim=1).flatten()\r\n\r\n # Calculate the accuracy rate\r\n accuracy = (preds == b_labels).cpu().numpy().mean() * 100\r\n val_accuracy.append(accuracy)\r\n\r\n # Compute the average accuracy and loss over the validation set.\r\n val_loss = np.mean(val_loss)\r\n val_accuracy = np.mean(val_accuracy)\r\n\r\n return val_loss, val_accuracy", "def evaluate(self, ts_loader=None):\n # start evaluation of the model\n self.tr_model.eval()\n samples, correct = 0, 0\n \n # check if a dataloader was provided for evaluation\n loader = self.ts_loader if not ts_loader else ts_loader\n \n with torch.no_grad():\n for x, y in loader:\n \n x, y = x.to(device), y.to(device)\n \n y_ = self.tr_model(x)\n _, predicted = torch.max(y_.detach(), 1)\n \n samples += y.shape[0]\n correct += (predicted == y).sum().item()\n \n # return evaluation statistics\n return {\"accuracy\" : correct/samples}", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def evaluate_module(\n model: nn.Module,\n dataset: Union[NumpyArrayTuple, torch.utils.data.Dataset],\n loss_fn,\n device: torch.device,\n metrics_map: MetricsMapType = None,\n batch_size: int = 64,\n verbose: bool = True,\n) -> MetricsValType:\n try:\n model = model.to(device)\n\n # if dataset is a tuple of np.ndarrays, convert to torch Dataset\n if isinstance(dataset, tuple):\n X = torch.from_numpy(dataset[0]).type(torch.FloatTensor)\n y = torch.from_numpy(dataset[1]).type(\n torch.LongTensor if dataset[1].dtype in [np.int, np.long] else torch.FloatTensor\n )\n dataset = torch.utils.data.TensorDataset(X, y)\n\n loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False)\n\n tot_samples, samples, num_batches = len(dataset), 0, 0\n len_tot_samples = len(str(tot_samples))\n\n # create metrics history\n history = MetricsHistory(metrics_map)\n\n with torch.no_grad():\n model.eval()\n for X, y in loader:\n X = X.to(device)\n y = y.to(device)\n\n # forward pass\n preds = model(X)\n # compute batch loss\n batch_loss = loss_fn(preds, y).item()\n history.calculate_batch_metrics(preds.to(\"cpu\"), y.to(\"cpu\"), batch_loss, val_metrics=False)\n samples += len(y)\n num_batches += 1\n if verbose:\n metricsStr = history.get_metrics_str(batch_metrics=True, include_val_metrics=False)\n print(\n \"\\rEvaluating (%*d/%*d) -> %s\"\n % (len_tot_samples, samples, len_tot_samples, tot_samples, metricsStr),\n end=\"\",\n flush=True,\n )\n else:\n # iteration over batch completed\n # calculate average metrics across all batches\n history.calculate_epoch_metrics(val_metrics=False)\n metricsStr = history.get_metrics_str(batch_metrics=False, include_val_metrics=False)\n print(\n \"\\rEvaluating (%*d/%*d) -> %s\"\n % (len_tot_samples, samples, len_tot_samples, tot_samples, metricsStr),\n flush=True,\n )\n return history.get_metric_vals(history.tracked_metrics())\n finally:\n model = model.to(\"cpu\")", "def evaluate_model(model, train_input, train_target, test_input, test_target, loss, save_plot, mname=None):\n # Evalute Model in train set\n epochs_number = len(loss)\n output = model.forward(train_input)\n train_loss = model.compute_loss(output, train_target).item()\n train_error = compute_number_error(output, train_target).item()\n\n print(\"\\nTraining Loss: \", train_loss)\n print(\"Training Number of errors: \", train_error)\n\n id_class_train = output.argmax(dim=1)\n if save_plot:\n plot_result(train_input, train_target, id_class_train, fname=mname)\n plot_loss(range(0, epochs_number), loss, fname=mname)\n\n # Deactivate dropout to test models\n model.enable_dropout(False)\n \n # Evaluate Model in test set\n output = model.forward(test_input)\n test_loss = model.compute_loss(output, test_target).item()\n test_error = compute_number_error(output, test_target).item()\n\n print(\"\\nTest Loss: \", test_loss)\n print(\"Test Number of errors: \", test_error)\n\n\n id_class_test = output.argmax(dim=1)\n if save_plot:\n plot_result(test_input, test_target, id_class_test, train=False, fname=mname)\n \n return [train_loss, train_error, test_loss, test_error]", "def evaluate(model, data_pars=None, compute_pars=None, out_pars=None, **kw):\n ddict = {}\n \n return ddict", "def evaluate_model(args,model,data_loader):\n model.eval()\n with torch.no_grad():\n for data,_ in data_loader:\n\n data,targets,angles = rotate_tensor(\n data.numpy(),\n args.init_rot_range,\n -args.eval_rotation_range,\n +args.eval_rotation_range)\n\n\n data = torch.from_numpy(data)\n targets = torch.from_numpy(targets)\n angles = torch.from_numpy(angles)\n angles = angles.view(angles.size(0), 1)\n\n # Forward pass\n output, identity_vectors, eucleidian_vectors= model(data, targets,angles*np.pi/180) \n # Get triplet loss\n losses=triple_loss(args,targets,output, identity_vectors, eucleidian_vectors)\n break\n\n return losses[0].item()", "def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)", "def evaluate_model(valp):\n\n a = valp.predict(data_inputs[\"Test\"], [], new=True)[0]\n\n m2e = np.mean(mse(a[\"o0\"], data_outputs[\"Test\"][\"o0\"]))\n acc = 1 - acc_err(a[\"o1\"][:, 0], np.argmax(data_outputs[\"Test\"][\"o1\"], axis=1))\n i_d = 50-np.mean(inception_score(a[\"o2\"][:100]))\n\n return np.array([m2e, acc, i_d])", "def eval_model(device, class_mapping, model, specs, data_transform):\n\n fake_class = class_mapping[\"fake\"]\n specs = [data_transform(f) for f in specs if f is not None]\n if not specs:\n return None\n specs = torch.stack(specs)\n d = specs.to(device)\n outputs = model(d)\n\n outputs = softmax(outputs, dim=1)\n probs = outputs[:, fake_class].cpu().detach().numpy()\n return gmean(probs)", "def evaluate(embed_model, model, pt, dataset, batch_size):\n\n embed_model.eval()\n model.eval()\n lossf = nn.NLLLoss(size_average=False)\n\n correct = 0\n total = 0\n total_loss = 0\n print('Start Evaluating!')\n print('Validation Size: {}'.format(len(dataset)))\n\n threshold = 0.3\n\n data_iter = iter(pt.batch_iter(dataset, batch_size))\n\n for i in range(len(dataset) // batch_size):\n\n # catch the data\n p1_idx, p2_idx, _, _, label = next(data_iter)\n\n p1_idx = Variable(p1_idx)\n p2_idx = Variable(p2_idx)\n label = Variable(label)\n\n if use_cuda:\n p1_idx = p1_idx.cuda()\n p2_idx = p2_idx.cuda()\n label = label.cuda()\n\n # Feed to the network\n p1_emb, p2_emb = embed_model(p1_idx, p2_idx)\n out = model(p1_emb, p2_emb)\n\n # print(label)\n # print(out)\n\n loss = lossf(out, label)\n total_loss += loss.data[0]\n\n prob = torch.exp(out)\n predicted = Variable(torch.LongTensor([1 if l[1].data[0] >= threshold else 0 for l in prob]))\n\n # _, predicted = torch.max(out, dim=1)\n total += p1_idx.size()[0]\n\n # print(predicted)\n\n correct += torch.sum((label == predicted), dim=0).data[0]\n\n print('Correct Labels: {}/{}'.format(correct, (i + 1) * batch_size))\n\n print('Valid Loss: {}, Acc: {}'.format(total_loss / float(total),\n correct / float(total)))", "def evaluate_model(model,test_inputs,test_labels,model_mode):\n\n if model_mode == \"classification\":\n y_pred = model.predict(test_inputs)\n print(\"Accuracy score: \", accuracy_score(test_labels, y_pred))\n #print(\"F1 score: \", f1_score(test_labels,y_pred, average='weighted'))\n\n conf_mx = confusion_matrix(test_labels, y_pred)\n #print(conf_mx)\n plt.matshow(conf_mx, cmap = plt.cm.jet)\n plt.show()\n\n if model_mode == \"regression\":\n y_pred = model.predict(test_inputs)\n print(\"Mean absolute error: \", mean_absolute_error(test_labels, y_pred))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=Y_test.keys()))", "def evaluate(model, subset, batch_size, show_first_batch=False):\n # Get dataset\n ds, ds_size = data.load_ds(subset, label='class'), data.size(subset, 'class')\n\n # Prepare dataset for input to model\n ds = ds.batch(batch_size, drop_remainder=True).prefetch(buffer_size=AUTOTUNE)\n steps = int(math.floor(ds_size / batch_size))\n\n # Evaluate model on dataset and print results\n results = model.evaluate(ds, steps=steps)\n\n # Split dataset into inputs and labels\n inputs_ds = ds.unbatch().map(lambda x, _: x).batch(batch_size)\n labels_ds = ds.unbatch().map(lambda _, y: y)\n\n # Make predictions on input\n preds = model.predict(inputs_ds, steps=steps)\n # Turn labels into list (same size as preds)\n labels = list(labels_ds.as_numpy_iterator())[:len(preds)]\n return labels, preds, results", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=category_names))\n pass" ]
[ "0.7838396", "0.75702345", "0.75052845", "0.7403759", "0.7293109", "0.71893185", "0.7124573", "0.71120846", "0.7107418", "0.70945823", "0.70358276", "0.7014347", "0.7008108", "0.6981123", "0.69724715", "0.6952163", "0.69312906", "0.69263285", "0.68702364", "0.68674177", "0.6859607", "0.68544716", "0.68421", "0.68136024", "0.6794461", "0.6793144", "0.6785928", "0.67836154", "0.6753818", "0.674202", "0.67419934", "0.6740016", "0.6733058", "0.67297703", "0.67242295", "0.6718042", "0.6715424", "0.6708011", "0.6705461", "0.66951025", "0.66873276", "0.66775405", "0.6669791", "0.66498786", "0.66327465", "0.6624036", "0.6619358", "0.6608294", "0.66050684", "0.65957785", "0.6586877", "0.65843797", "0.65664303", "0.6564173", "0.6556183", "0.6555728", "0.6554877", "0.65348524", "0.6534566", "0.6523174", "0.6508012", "0.65078425", "0.6504656", "0.6499709", "0.64991796", "0.64966226", "0.6493185", "0.6491822", "0.64850265", "0.64843684", "0.6483478", "0.64819676", "0.64727134", "0.64633375", "0.646057", "0.6458942", "0.64394087", "0.6437908", "0.6437908", "0.6437908", "0.6436003", "0.6422798", "0.64198047", "0.64172167", "0.6414519", "0.64124566", "0.6408136", "0.64052224", "0.6404695", "0.64021593", "0.6397341", "0.6394701", "0.639279", "0.63863117", "0.6378831", "0.63761824", "0.6370643", "0.63668954", "0.6365173", "0.6361309" ]
0.6675748
42
Make predictions on a dataset
def predict_dataset( self, model: nn.Module, batch_size: int = 32, data: Union[str, th.utils.data.Dataset] = "test", collate_fn: Optional[Callable] = None, ): # Set model to test mode mode = model.training model.train(mode=False) # Select dataset for evaluation dataset = data if isinstance(data, str): dataset = self.get_split(data) elif not isinstance(dataset, th.utils.data.Dataset): raise ValueError( "`data` must be a pytorch dataset or one of 'dev'/'valid'" f"/'test/'train', got {dataset.__class__.__name__} instead" ) # Dataloader data_loader = DataLoader( dataset, batch_size=batch_size, collate_fn=self.collate_fn if collate_fn is None else collate_fn, ) log_ps, y_hats = [], [] for batch in data_loader: # Get model predictions with th.no_grad(): log_p, y_hat = self.predict(model, batch) # Track predictions and log probabilities log_ps.append(log_p) y_hats.append(y_hat) # Concatenate log_ps = th.cat(log_ps, dim=0).cpu() y_hats = th.cat(y_hats, dim=0).cpu() # Reset model to the original mode model.train(mode=mode) return log_ps, y_hats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, data):\n\t\traise NotImplementedError", "def predict(self, data: List):", "def predict(self, X):", "def predict(self, X):", "def make_predictions(df):\n t_labels = get_labels(\"labels_pca\")\n # clean data\n df = clean_data(df)\n # engineer data\n df = engineer_features(df)\n # predict\n with open(\"model.pkl\",\"r\") as mdl:\n model = pickle.load(mdl)\n mdl.close()\n predictions = model.predict(df[t_labels])\n return predictions", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, instances):\r\n raise NotImplementedError", "def predict(self, predPoints=None):", "def predict(self, data):\n return self.result.predict(data)", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def predict(self, data_in):\n pass", "def predict(self, datafile):", "def predict(self, xs, **kwargs):", "def predict(self, x):\n \n\n return predictions", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)", "def predict(self, **kwargs):\n raise NotImplementedError", "def predict(self, test_data):\r\n return self.gs.predict(test_data)", "def predict_only(self):", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self):\n raise NotImplementedError", "def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})", "def _predict(self, testX):\n pass", "def predict(self, images, batch_size):\n pass", "def predict(self, model, x_test):\n pass", "def test(self, dataset): \n predictions = np.zeros(len(dataset), int)\n \n accuracy = self.random_forest.score(dataset[:,:-1], dataset[:,-1]) # Predict and compute accuracy.\n predictions = self.predict(dataset[:,:-1]) # Predict and return list of predictions.\n \n return predictions, accuracy", "def predict(self, X, pred_batch_size=None):", "def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)", "def predict():\n import trace\n trace.predict()", "def predict(self, review):\n raise NotImplementedError", "def predict(test_dataset,test_tX,weights):\n for idx, dataset in enumerate(test_tX):\n test_dataset[idx]['Prediction'] = predict_labels(weights[idx],dataset)\n return test_dataset", "def predict(data: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(data={\"prediction\": trained_model.predict(data)})", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def predict(self, data):\r\n return self.sess.run([self.predict_op, self.Mu], feed_dict={self.X: data})", "def predict(self, model, context, data):\n pass", "def predict(self, data):\r\n x = []\r\n x.append(data.sampled_matrix())\r\n x = np.array(x)\r\n x = np.reshape(x, (x.shape[0], x.shape[1], 1))\r\n\r\n result = self.model.predict(x)\r\n return result", "def make_prediction(x_train, y_train, x_test, model):\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n return y_predict", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def predict_evidences(self, X):", "def predict(classifier, data):\n print(\"Beggining to classify data\")\n results = classifier.predict(data)\n results = pd.DataFrame(results)\n results.index += 1\n results.to_csv(\"out/results.csv\", header=[\"Label\"], index=True, index_label=[\"ImageId\"])\n print(\"Finished classifying data\")", "def predict(self, df, target):\n self.logger.info(\"Beginning MatPipe prediction using fitted pipeline.\")\n df = self.autofeaturizer.transform(df, target)\n df = self.cleaner.transform(df, target)\n df = self.reducer.transform(df, target)\n predictions = self.learner.predict(df, target)\n self.logger.info(\"MatPipe prediction completed.\")\n return predictions", "def predictions(logits):\n # TODO implement predictions\n return logits", "def predict(self, test_data):\n return self.leader.predict(test_data)", "def predict():\n\n if request.is_json:\n req = request.get_json(force=True)\n df = pd.read_json(req, orient='records')\n return pd.DataFrame(clf_loaded.predict(df).round()).to_json(orient='records')", "def predict(self, Xtt):\n # predict outputs for test dataset\n self.logger.info(\n self.__name__ + ' predicts on {:d} samples.'.format(Xtt.shape[0]))\n pass", "def predict(data, model_predict):\n # Execute any steps you need to do before scoring\n\n # This method makes predictions against the raw, deserialized model\n #predictions = model_predict(data)\n\n data.to_csv(\"/opt/code/chemprop_folder/for_scoring.csv\", index=False)\n\n args = PredictArgs().parse_args([\n '--test_path', '/opt/chemprop_folder/for_scoring.csv',\n '--checkpoint_path', '/opt/code/model.pth',\n '--preds_path', '/opt/chemprop_folder/preds.csv'\n ])\n\n make_predictions(args)\n\n preds_df = pds.read_csv(\"/opt/chemprop_folder/preds.csv\")\n sh = str(preds_df.shape)\n print(sh)\n\n preds_df = preds_df.rename(columns = {\"p_np\": \"positive_class_label\"})\n preds_df = preds_df.drop(columns=['smiles'])\n preds_df[\"negative_class_label\"] = 1 - preds_df[\"positive_class_label\"]\n\n print(preds_df.head())\n\n # Execute any steps you need to do after scoring\n # Note: To properly send predictions back to DataRobot, the returned DataFrame should contain a\n # column for each output label for classification or a single value column for regression\n return preds_df", "def predict(self, X):\n raise NotImplementedError", "def test_fit_predict() -> None:\n mapie = MapieClassifier()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def predict(self, data: np.array) -> np.array:\n raise NotImplementedError", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n # print len(data[0])\n # print type(data[0])\n # print data.shape\n return self.model.predict(data, 1, verbose) # ,steps)", "def _predict(self, dataset):\n binary_predictions = ProxyClassifier._predict(self, dataset)\n self.ca.estimates = binary_predictions\n predictions = [ {-1: self.__predictneg,\n +1: self.__predictpos}[x] for x in binary_predictions]\n self.ca.predictions = predictions\n return predictions", "def predict(x_tst, model):\n\n predictions = model.predict(x_tst)\n return predictions", "def predict(model, images):\n return model.predict_classes(images)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def _predict_all(self, data):\n preds = np.zeros(len(data))\n for row in data.itertuples():\n index, item, _, user = row\n preds[index] = self.predict(user, item)\n return preds", "def predict(self, data):\n data['predicted'] = self.sentiment_classifier.predict_estimator(data)\n return data", "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "def predict_data(data: pd.DataFrame, model: list):\n prediction = []\n for i, row in data.iterrows():\n prediction.append(predict_dataset(row, model))\n return prediction", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def test_test_model(self):\n\n dataset = ClassificationTestDataset()\n model = ClassificationTestModel(dataset)\n preds = list(model.predict(dataset.examples))\n self.assertEqual(np.argmax(preds[0]['preds']), 2)\n self.assertEqual(np.argmax(preds[1]['preds']), 1)\n self.assertEqual(np.argmax(preds[2]['preds']), 4)\n self.assertEqual(np.argmax(preds[3]['preds']), 3)", "def predict(self): \n return self.model.predict(self.test_x)", "def predict(self, samples): \n return self.random_forest.predict(samples)", "def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))", "def predict(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(model):\n # load test dataset\n test = dict(json.load(open('util_files/test.json')))\n ids = test['ids']\n data = test['data']\n\n df = pd.read_csv('data/document_departments.csv')\n labels = dict(df.values.tolist())\n\n id2cls = dict(json.load(open('util_files/id2cls.json')))\n \n ytrue = []\n ypredicted = []\n \n for i in range(len(data)):\n \n prediction = np.argmax(model.predict_on_batch(np.expand_dims(data[i], axis=0)))\n \n ypredicted.append(id2cls[str(prediction)])\n \n cls = labels[int(ids[i])]\n ytrue.append(cls)\n \n print \"classification report\"\n print classification_report(y_true=ytrue,\n y_pred=ypredicted)\n \n print \"*********************\"\n print \"Accuracy on test set\"\n print accuracy_score(y_true=ytrue,\n y_pred=ypredicted)\n print \"*********************\"", "def predict(self, obs):\n pass", "def predict(self, dataset):\n # TODO: self.model(training=False)\n # logging.info('Predicting')\n # if self.verbosity > 1:\n # print('Predicting')\n dataset = rdata.data2dataset(dataset) # Convert to dataset\n assert dataset.get_dim_input() == self.n_inputs, \\\n 'Number of covariates does not match the model %d -> %d' % (dataset.get_dim_input(), self.n_inputs)\n n_data = dataset.get_n_data()\n\n pred = self._predict(dataset=dataset) # Predict\n\n if self.isprobabilistic():\n assert pred[0].shape == (n_data, self.n_outputs)\n assert pred[1].shape == (n_data, self.n_outputs)\n else:\n assert pred.shape == (n_data, self.n_outputs)\n return pred", "def predict(dataset):\n import capsnet\n\n # Load (standardized) input data and associated file names\n test_x, _, names = _load_data(dataset)\n\n # Predict class probabilities for each model (epoch)\n at_preds, sed_preds = [], []\n\n for epoch in _determine_epochs(cfg.prediction_epochs):\n model = _load_model(epoch)\n at_pred, sed_pred = utils.timeit(\n lambda: capsnet.gccaps_predict(test_x, model),\n '[Epoch %d] Predicted class probabilities' % epoch)\n\n at_preds.append(at_pred)\n sed_preds.append(sed_pred)\n\n # Average predictions to give an overall output\n total_at_pred = np.mean(at_preds, axis=0)\n total_sed_pred = np.mean(sed_preds, axis=0)\n\n # Ensure output directory exists and set file path format\n os.makedirs(os.path.dirname(cfg.predictions_path), exist_ok=True)\n predictions_path = cfg.predictions_path.format('%s', dataset.name)\n\n # Save free parameters to disk\n utils.log_parameters({'prediction_epochs': cfg.prediction_epochs},\n os.path.join(os.path.dirname(cfg.predictions_path),\n 'parameters.json'))\n\n # Write predictions to disk\n utils.write_predictions(names, total_at_pred, predictions_path % 'at')\n utils.write_predictions(names, total_sed_pred, predictions_path % 'sed')", "def process(self, data):\n return self.estimator.predict(data)", "def _predict(self, X):\n raise NotImplementedError", "def test(self) -> None:\n\n self._predictions = self._lr.predict(self._X_test)", "async def predict(iris: IrisPredictionInput) :\n return clf.predict(iris.data)", "def predict(self,Xpred, nsamples=2000, tune=100, progress=True, points2=[]):\n if self.type_y=='affine':\n return self.predict_affine(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='regression':\n return self.predict_regression(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='mixed':\n return self.predict_mixed(Xpred, nsamples, tune, progress, points2)", "def extract_predictions(dataset):\n return dataset.Prediction.apply(lambda x: -1 if x == 'b' else 1)", "def test_fit_predict() -> None:\n mapie = MapieRegressor()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def predict(self, data: List):\r\n return self._lda.predict(data)", "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***", "def predict(self, df: pd.DataFrame):\n x_pad, labels, tids = self.preprocess(df, train=False)\n x_split = split_inputs(x_pad, self.vocab_sizes)\n this_batch_size = x_pad.shape[0]\n noise = np.random.normal(0, 1, (this_batch_size, self.latent_dim))\n gen_inputs = x_split\n gen_inputs.insert(-1, noise)\n predictions = self.gen.predict(gen_inputs)\n predictions_concat = np.concatenate(predictions, axis=2)\n return self.postprocess(predictions_concat, labels, tids)", "def _generate_predictions(self, data):\n\n return np.zeros(data.shape[0])", "def predict_single_fold(self, model: TorchBasedLinearEstimator, dataset: TabularDataset) -> np.ndarray:\n pred = model.predict(dataset.data)\n\n return pred", "def predict(self, test_dataset: Dataset) -> PredictionOutput:\n test_dataloader = self.get_test_dataloader(test_dataset)\n return self._prediction_loop(test_dataloader, description=\"Prediction\")", "def _predict(self, data):\n # make sure we're talking about arrays\n data = N.asarray(data)\n\n # checks only in debug mode\n if __debug__:\n if not data.ndim == 2:\n raise ValueError, \"Data array must be two-dimensional.\"\n\n if not data.shape[1] == self.__data.nfeatures:\n raise ValueError, \"Length of data samples (features) does \" \\\n \"not match the classifier.\"\n\n # compute the distance matrix between training and test data with\n # distances stored row-wise, ie. distances between test sample [0]\n # and all training samples will end up in row 0\n dists = self.__dfx(self.__data.samples, data).T\n\n # determine the k nearest neighbors per test sample\n knns = dists.argsort(axis=1)[:, :self.__k]\n\n # predicted class labels will go here\n predicted = []\n\n if self.__voting == 'majority':\n vfx = self.getMajorityVote\n elif self.__voting == 'weighted':\n vfx = self.getWeightedVote\n else:\n raise ValueError, \"kNN told to perform unknown voting '%s'.\" \\\n % self.__voting\n\n # perform voting\n results = [vfx(knn) for knn in knns]\n\n # extract predictions\n predicted = [r[0] for r in results]\n\n # store the predictions in the state. Relies on State._setitem to do\n # nothing if the relevant state member is not enabled\n self.predictions = predicted\n self.values = [r[1] for r in results]\n\n return predicted", "def predict_from(self, inputs, to_layers):", "def predict_all(self, imgs):\n return self._predict(imgs)", "def do_predict(self):\n answer = []\n response = []\n\n for it_predictions in json.loads(request.data.decode('UTF-8')):\n prediction = it_predictions['score']\n for ite_clf in g_list_of_classifier:\n answer.append(ite_clf.predict(prediction))\n if answer.count(True) > answer.count(False):\n response.append({'answer' : True})\n else:\n response.append({'answer' : False})\n return json.dumps(response, indent=4)", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_svm.predict(data)", "def predict(self, X: List[np.ndarray], **kwargs) -> List[np.ndarray]:", "def predict(self, documents):\n raise NotImplementedError()", "def predict(\r\n model: sklearn_Pipeline, \r\n test_x: pd.DataFrame\r\n) -> pd.DataFrame:\r\n # Return predictions\r\n return model.predict(test_x)", "def predict(self, data):\n\n assert self.trees is not None\n\n return np.array([self.predict_row(data[i]) for i in range(data.shape[0])])", "def predict(self, features):\n return self.search_results.predict(features)" ]
[ "0.77277684", "0.7660498", "0.76100504", "0.76100504", "0.7579166", "0.75640804", "0.75640804", "0.75640804", "0.7563839", "0.7560296", "0.74923545", "0.7473842", "0.74510586", "0.743913", "0.7410133", "0.7398052", "0.731464", "0.7291085", "0.7289969", "0.7258649", "0.723932", "0.723932", "0.723932", "0.7236595", "0.72255534", "0.72148776", "0.720783", "0.72037053", "0.71839005", "0.718335", "0.71541476", "0.71453434", "0.71435976", "0.7136906", "0.7135737", "0.71339256", "0.71333593", "0.7121172", "0.71195513", "0.7112748", "0.7082594", "0.7077601", "0.7076203", "0.70553815", "0.70502687", "0.7038814", "0.70372367", "0.7035234", "0.7034449", "0.70225465", "0.70224535", "0.70192796", "0.70162904", "0.70118994", "0.70099294", "0.70009947", "0.6999663", "0.69817364", "0.69812906", "0.6976876", "0.6976876", "0.6972663", "0.69650203", "0.69626915", "0.6962134", "0.69510996", "0.69510996", "0.69510996", "0.6949661", "0.6944207", "0.6943006", "0.69314843", "0.69300294", "0.6915104", "0.69136125", "0.68944436", "0.6887852", "0.6882847", "0.686709", "0.6865964", "0.68507886", "0.6849224", "0.6847194", "0.6832403", "0.68278295", "0.6827422", "0.68251544", "0.6824321", "0.68131095", "0.68103707", "0.6808208", "0.68031967", "0.68016356", "0.68003887", "0.6798183", "0.6798007", "0.67912936", "0.6790643", "0.67899317", "0.6782205", "0.6777451" ]
0.0
-1
Number of classes for this task
def n_classes(self): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_classes(self):", "def n_classes(self):\n raise NotImplementedError", "def n_classes(self):\n raise NotImplementedError", "def num_classes(self):\n raise NotImplementedError", "def num_classes(self):\n\t\treturn 10", "def num_classes(self):\n\t\treturn len(self.classes)", "def get_number_of_classes(self):\n return len(self.class_dict.keys())", "def num_of_classes(self):\n return len(self.classes_())", "def num_of_classes(self):\n return len(self.classes_())", "def num_classes(self):\n\t\t\treturn len(self.classes)", "def num_classes(self):\n return len(self.classes)", "def numberOfClasses(self):\n classes = self.classesAndFrames()\n return len(classes.keys())", "def num_classes(self):\n return self._num_classes", "def get_num_classes(self):\n return len(self.class_map_dict)", "def num_classes():\n return NUM_CLASSES", "def num_class(self):\r\n return self._num_class", "def num_classes_a(self):\r\n return self._num_classes_a", "def __len__(self) -> int:\n\n length = self.n_classes * 100\n\n return length", "def class_size(self):\n\t\tif self.subject.count()==0:\n\t\t\treturn student.objects.all().filter(reg=self).count()\n\t\telse:\n\t\t\treturn self.grade_set.all().distinct().count()", "def count(self, cls=None):\n return len(self.all(cls))", "def do_count(self, *args):\n count = 0\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n else:\n ''' Get a list of specified instances '''\n for key, obj in storage.all().items():\n key = key.split('.')\n if key[0] == args[0]:\n count += 1\n print(count)", "def n_tasks(self) -> int:\n pass", "def count(self, args):\n counter = 0\n lists = args.split()\n\n if lists[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n objects = storage.all()\n for key in objects:\n name = key.split('.')\n if name[0] == lists[0]:\n counter += 1\n print(counter)", "def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)", "def get_n_classes(json_path: str):\n with open(json_path) as json_file:\n n_classes = len(json.load(json_file))\n return n_classes", "def countObjects(self, classType):\n count = 0\n for dobj in self.doId2do.values():\n if isinstance(dobj, classType):\n count += 1\n return count", "def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")", "def get_number_of_models():\n return 8", "def count(self, class_name, stored_objects):\n count = 0\n for k in stored_objects:\n inst_list = k.split('.')\n if inst_list[0] == class_name:\n count += 1\n print(count)", "def count(self, cls=None):\n total = 0\n if type(cls) == str and cls in classes:\n cls = classes[cls]\n total = self.__session.query(cls).count()\n elif cls is None:\n for cls in classes.values():\n total += self.__session.query(cls).count()\n return total", "def num_tasks(self) -> int:\n return 1", "def DSC(self):\n return len(self.user_defined_classes)", "def init_counts_for(self, cls):\n counter = self._counts[cls] = defaultdict(int)\n for task in self.iter_tasks(cls):\n counter['total'] += 1\n state = task.execution.state\n counter[state] += 1\n if state == Run.State.TERMINATED:\n if task.execution.returncode == 0:\n counter['ok'] += 1\n else:\n counter['failed'] += 1\n if counter[Run.State.TERMINATED] > 0:\n warn(\"The Engine class will forget TERMINATED tasks in the near future.\"\n \"In order to get correct results, `init_counts_for`\"\n \" should be called before any task reaches TERMINATED state\",\n FutureWarning)", "def num_tasks(self):\n return self.num_labels", "def num_train_instances(self):\n raise NotImplementedError()", "def ntasks(self):\n return len(self.tasks)", "def do_count(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n objs = [key for key in map(lambda x: x.split(\".\")[0],\n storage.all().keys())]\n print(objs.count(arg_list[0]))", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def num_classes(self,dd=\"\"):\n\t\tif dd==\"\":\n\t\t\tdd=datadrop.objects.all().filter(cohort=self.cohort)\\\n\t\t\t\t.order_by('-date')[0]\n\t\telif isinstance(dd,str):\n\t\t\tdd=datadrop.objects.get(name=dd,cohort=self.cohort)\n\t\treturn self.classgroup_set.all().count()", "def __number_of_jobs__(self):\n # | - __number_of_jobs__\n num_jobs = 0\n\n # Regular jobs\n if self.job_var_lst is not None:\n num_jobs = len(self.job_var_lst)\n\n # Individual dir jobs\n if self.indiv_dir_lst is not None:\n num_jobs += len(self.indiv_dir_lst)\n\n\n return(num_jobs)\n # __|", "def count(self):\n\n raise NotImplementedError", "def num_classes_b(self):\r\n return self._num_classes_b", "def N(self) -> int:\n n_types = len(self)\n return n_types", "def get_num_classes(hparams):\n num_classes_map = {\n 'imagenet': 1000,\n 'cifar10': 10,\n }\n if hparams.input_data.input_fn not in num_classes_map:\n raise ValueError(\n f'Unknown number of classes for input_fn {hparams.input_data.input_fn}')\n return num_classes_map[hparams.input_data.input_fn]", "def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n else:\n components = []\n multiplicities = []\n for x in self.irreducible_components():\n if components.count(x) == 0:\n components.append(x)\n multiplicities.append(1)\n else:\n y = components.index(x)\n multiplicities[y] = multiplicities[y]+1\n\n sizes = [ x.class_size() for x in components ]\n if NotImplemented in sizes:\n print(\"Size unknown\")\n return NotImplemented\n else:\n return prod( [binomial(sizes[i]+multiplicities[i]-1,\n multiplicities[i] ) for i in range (0,len(sizes))])", "def num(self, cls):\n try:\n return self.classes.index(cls) + 1\n except:\n raise Exception(\"Someone asked for \" + str(cls) + \", which is not here \" + str(self))", "def count_tasks(self):\n return len(self.tasks)", "def class_callcount(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += count\n return rval", "def get_classes(self):\n return list(range(self.num_clss))", "def class_callcount(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += count\r\n return rval", "def num_tasks(self) -> int:\n return len(self.targets)", "def count_classes(self, index=None):\n \n if index is None:\n index = np.arange(self.Samples.shape[0])\n elif isinstance(index, int):\n index = [index]\n \n count = np.zeros((len(index), len(self._classes)), dtype=np.int)\n for _ind in range(len(index)):\n rois = self.__getrois__(index[_ind])\n count[_ind, :] = np.bincount(rois[:,4].astype(np.int), \n minlength=len(self._classes))\n \n return count", "def count(self):\n # TODO not implemented yet\n return 0", "def getNumThreads(cls) -> int:\n return cls.NUMTHREADS", "def count():", "def do_count(self, line):\n try:\n tokens = split(line)\n except ValueError:\n return None\n objects = models.storage.all()\n if len(tokens) < 1:\n print(\"** class name missing **\")\n else:\n cls = models.getmodel(tokens[0])\n if cls is None:\n print(\"** class doesn't exist **\")\n else:\n matches = 0\n for obj in objects.values():\n if type(obj) is cls:\n matches += 1\n print(matches)", "def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes", "def number_of_iterations(self) -> int:\n pass", "def nb_objects(self) -> int:\n return 0", "def count_class(srcfile, listfile):\n cls_list = []\n\n # open the list file\n with open(listfile, 'r') as f:\n lines = f.readlines()\n\n # check each file in the list\n for line in lines:\n xml_file = srcfile.format(line.strip())\n\n tree = ET.parse(xml_file)\n\n # objs is all the objects in the xml\n objs = tree.findall('object')\n\n # find the class name in the object, and add it to the cls list\n for ix, obj in enumerate(objs):\n cls = str(obj.find('name').text)\n cls_list.append(cls)\n\n # find the keys and sort, count the number of boxes of the keys\n if len(cls_list) > 0:\n cls_list.sort()\n import numpy as np\n cls_arr = np.array(cls_list)\n cls1 = list(set(cls_list))\n print('unsort classes is:', cls1)\n cls1.sort()\n print('sorted classes is:', cls1)\n classes = np.unique(cls_arr)\n print('the class number is:', classes.shape[0])\n print('----------------------------')\n print('the number of each class:')\n for i in range(0, classes.shape[0]):\n # print(classes[i], cls_list.count(classes[i]))\n print(classes[i], ':', np.where(cls_arr==classes[i])[0].shape[0])\n print('----------------------------')\n\n print('the number of all the boxes is:', len(cls_list))\n return cls_list", "def __len__(self):\n try:\n return self.number_tasks\n except AttributeError:\n return 0", "def get_num_objects(cls):\n return cls.mum_objects", "def get_num_classes(dataset: str):\n if dataset == \"imagenet\" or dataset == \"kitti\":\n return 1000\n elif dataset == \"cifar10\" or dataset == \"mnist\" or dataset == \"fashion_mnist\":\n return 10", "def num_training_examples(self):", "def get_number_of_classes(model_config):\n meta_architecture = model_config.WhichOneof(\"model\")\n meta_architecture_config = getattr(model_config, meta_architecture)\n\n if hasattr(meta_architecture_config, \"num_classes\"):\n return meta_architecture_config.num_classes\n else:\n raise ValueError(\"{} does not have num_classes.\".format(meta_architecture))", "def how_many(cls):\n #cls.population equivalent to Robot.population\n print(\"We have {:d} robots.\".format(cls.population))", "def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups", "def number_objects():\n classes = [Amenity, City, Place, Review, State, User]\n names = [\"amenities\", \"cities\", \"places\", \"reviews\", \"states\", \"users\"]\n\n num_objs = {}\n for i in range(len(classes)):\n num_objs[names[i]] = storage.count(classes[i])\n\n return jsonify(num_objs)", "def class_nodes(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += 1\r\n return rval", "def num_trials(self):", "def class_nodes(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += 1\n return rval", "def number_of_nodes(self, ntype: str = None) -> int:\n return self.num_nodes(ntype)", "def count(self):\n return int()", "def count(self):\n return int()", "def MOA_class_level(self, class_entity: und.Ent):\n counter = 0\n for ref in class_entity.refs(\"Define, Typed, Set, Create\", \"Java Variable, Parameter\"):\n if ref.ent().type() in self.user_defined_classes:\n counter += 1\n filter_ = \"Method ~Unknown ~Jar ~Library ~Constructor ~Implicit ~Lambda ~External\"\n for ref in class_entity.refs(\"Define, Typed, Set, Create\", filter_):\n for ref2 in ref.ent().refs(\"Define, Typed, Set, Create\", \"Java Variable ~Unknown, Java Parameter\"):\n if ref2.ent().type() in self.user_defined_classes:\n counter += 1\n\n return counter", "def tally(self):\n return self.count", "def num_carns(self):\n return self._num_carns", "def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()", "def get_num_instances(self):\n return len( self.get_instances_ids() )", "def _class_count(objects):\n\n totals = {}\n for obj in objects:\n try:\n cls = obj.__class__\n except AttributeError:\n cls = type(obj)\n name = \"%s.%s\" % (cls.__module__, cls.__name__)\n try:\n totals[name].append(obj)\n except KeyError:\n totals[name] = [obj]\n\n totals = totals.items()\n totals.sort(lambda a,b: cmp(len(a[1]),len(b[1])))\n totals = totals[-20:] # Is this a reasonable filter?\n return totals", "def nworkers(self):\n return len(self._workers)", "def count_target_class_data(data, target_class):\n count = 0\n for row in data:\n if row[0] == target_class:\n count += 1\n\n return count", "def num_partitions(self): # -> None:\n ...", "def class_num(self) -> int:\n return int(np.argmax(self.class_scores))", "def _n_workers(self, processes: int = 2) -> int:\n if 2 <= processes <= cpu_count():\n n_workers = processes\n else:\n n_workers = cpu_count()\n return n_workers", "def __len__(self):\n return self.nb_iterations", "def how_many(cls):\n print(\"We have {:d} robots.\".format(cls.population))", "def how_many(cls):\n print(\"We have {:d} robots.\".format(cls.population))", "def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()", "def num_partitions(self): # -> int:\n ...", "def count(self) -> int:\n return self.__count", "def calcNumberOfMajorityClassRows(self, data, structure):\n maxCount, classIndex = 0, structure['class']['index']\n for value in structure['class']['values']:\n newData = list(filter(lambda y: y[classIndex] == value, data))\n if len(newData) >= maxCount:\n maxCount = len(newData)\n return maxCount", "def count(self):\n return self.size()", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def __len__(self) -> int:\n return len(self._tasks)", "def __len__(self) -> int:\n return len(self._tasks)", "def __len__(cls) -> int:\n return len(filter(lambda x: isinstance(x, Constant)), cls.__dict__.values())" ]
[ "0.85922784", "0.8439114", "0.8439114", "0.8354348", "0.8313837", "0.8283035", "0.8272501", "0.82022357", "0.82022357", "0.8197399", "0.8057032", "0.79937804", "0.7968803", "0.7952655", "0.7936278", "0.75892717", "0.74640423", "0.72919357", "0.72601396", "0.7232733", "0.7190453", "0.7140159", "0.6885804", "0.68403864", "0.6829295", "0.68090045", "0.6806945", "0.67990243", "0.67753345", "0.67683077", "0.67523897", "0.6749389", "0.67475075", "0.67433846", "0.6725483", "0.6718023", "0.66765785", "0.6665592", "0.6665592", "0.6665592", "0.6665592", "0.6656789", "0.66408", "0.6623043", "0.6595685", "0.6589907", "0.6586414", "0.65643185", "0.6547038", "0.6543584", "0.6541584", "0.65298396", "0.6526941", "0.64924645", "0.64899", "0.6477896", "0.64569795", "0.6456831", "0.644761", "0.6447036", "0.6440761", "0.6440372", "0.6433031", "0.64321774", "0.6386725", "0.63849205", "0.6367675", "0.63446623", "0.63400453", "0.6335263", "0.63291585", "0.6328193", "0.6327937", "0.62998503", "0.6298612", "0.6293579", "0.6293579", "0.6289607", "0.6271854", "0.62663716", "0.62628484", "0.62604475", "0.62565154", "0.6255355", "0.6247297", "0.6243788", "0.623093", "0.62252766", "0.621824", "0.62116843", "0.62116843", "0.620638", "0.6191379", "0.6186288", "0.61725473", "0.61647654", "0.61580575", "0.6157655", "0.6157655", "0.6154896" ]
0.8404323
3
Shape of the input for this task
def input_size(self): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def input_shape(self) ->torch.Size:\n pass", "def inputShape(self):\n return self.input_shape", "def input_shape(self):\n return [None, 32, 32, 1]", "def input_shape(self):\n return [None, 32, 32, 1]", "def input_shape(self):\n return [None, 32, 32, 1]", "def input_shape(self):\n return self._ipt_shape", "def get_input_shape(self):\n return self.network.inputs[self.input_blob].shape", "def n_inputs(self):", "def get_input_shape(self):\n return self.__x.shape", "def input_shape(self) ->torch.Size:\n if self._encoding_size is None:\n return torch.Size([self._max_sequence_length])\n else:\n return torch.Size([self._max_sequence_length, self._encoding_size])", "def inputs(self):\n pass", "def get_input_shape(self):\n\n fname = self.train_database[0]\n if self.mapfly:\n feature, _ = self.map_one_molecule(fname)\n else:\n feature, _ = self.load_one_molecule(fname)\n\n self.data_shape = feature.shape\n\n if self.pair_chain_feature:\n feature = self.make_feature_pair(\n feature, self.pair_chain_feature)\n\n if self.transform:\n feature = self.convert2d(feature, self.proj2D)\n\n self.input_shape = feature.shape", "def get_input_tensor_shape(self):\n return self._engine.get_input_tensor_shape()", "def get_input_dim(self) -> int:\n raise NotImplementedError", "def output_shape(self):\n raise NotImplementedError", "def compute_output_shape(self, input_shape):\r\n return input_shape", "def data_shape(self):\n raise NotImplementedError", "def data_shape(self):\n raise NotImplementedError", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def __len__(self):\r\n\r\n return self.yInput.shape[1]", "def inputs(self):\n return NotImplementedError", "def get_args_shape(self):\n if not self.is_built:\n raise RuntimeError(\"A plan needs to be built before input shapes can be known.\")\n\n return [ph.expected_shape for ph in self.role.input_placeholders()]", "def build(self, input_shape):\n pass", "def get_output_shape(self):\n return []", "def __len__(self):\r\n return self.yInput.shape[1]", "def compute_output_shape(self,input_shape):\n return (input_shape[0][0])", "def data_shape():\n return DATA_SHAPE", "def required_input_dim(space: gym.Space, **kwargs) -> int:", "def __len__(self):\n return len(self.inputs[0])", "def input_type_shapes(self):\n return self._input_type_shapes", "def input_size(self):\n return self.env.input_size", "def shape(self):", "def shape(self):", "def __len__(self):\n return len(self.inputs)", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def inputs(self) -> Sequence[jnp.ndarray]:\n pass", "def get_output_shape_for(self, input_shape):\n # Extract nodes and membership\n atom_features_shape = input_shape[0]\n # membership_shape = input_shape[2]\n\n # assert (len(atom_features_shape) == 2,\n # \"GraphGather only takes 2 dimensional tensors\")\n n_feat = atom_features_shape[1]\n return self.batch_size, n_feat", "def get_sample_shape(inputs):\n return tuple(inputs.size())[1:]", "def get_output_shape(self):\n return self.shape", "def get_output_shape(self):\n return self.out.shape.as_list()", "def compute_output_shape(self, input_shape):\n batch_size = input_shape[0]\n sequence_length = input_shape[1]\n return (batch_size, sequence_length)", "def input(self):", "def shape_from_args(self):\n return self.args[0].shape", "def shape(self):\n return self[0].shape", "def shape_for_keras(data):\n raise NotImplementedError", "def infer_shape(self, node, input_shapes):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_shapes) == 1\r\n N, C, H, W = input_shapes[0]\r\n p_H = (H + 2 * self.padding - self.kernel_H) / self.stride + 1\r\n p_W = (W + 2 * self.padding - self.kernel_W) / self.stride + 1\r\n return (N, C, p_H, p_W)", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def shape(self):\n return self.initial_value.shape", "def shape(self):\n return None", "def shape(self):\n return self.X.shape", "def __init__(self):\n self.out = None\n self.in_shape = None\n self.work_shape = None", "def input_dim(self):\n if hasattr(self, \"_input_dim\"):\n return self._input_dim\n return self.__input_dim", "def get_input_dimension(self):\n return self.in_dim", "def shape(self):\n return self.to_array().shape", "def shape(self):\n return np.array([self.w, self.h])", "def add_input_and_output_shape(self, input_shape, output_shape):", "def build_input(self):\n n_input = tf.placeholder(tf.int32, [None, None], name='n_input')\n t_input = tf.placeholder(tf.int32, [None, None], name='t_input')\n n_target = tf.placeholder(tf.int32, [None, None], name='n_target')\n t_target = tf.placeholder(tf.int32, [None, None], name='t_target')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n return n_input, t_input, n_target, t_target, keep_prob", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, x, y, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1\n for s in self.incoming_shape[0:2] + self.incoming_shape[2:-1] + [self.n_units]]", "def shape(self):\n return self.dataset.shape", "def shape(self):\n return self.__shape", "def shape(self):\n return self.__shape", "def get_input_shape(self, name):\n return self._input_shape.get(name)", "def getInputLength(self):\n return len(self.X[0])", "def input_size(self) ->int:\n return self._cell.input_size", "def _ExtractInputShapes(inputs):\n if context.executing_eagerly():\n return array_ops.shape_n(inputs)\n sizes = []\n fully_known = True\n for x in inputs:\n input_shape = array_ops.shape(x)\n if not isinstance(input_shape,\n tensor.Tensor) or input_shape.op.type != \"Const\":\n fully_known = False\n break\n sizes.append(input_shape)\n\n if fully_known:\n return sizes\n else:\n return array_ops.shape_n(inputs)", "def shape(self):\n\n return self.data.shape", "def shape(self):\n return self._data.shape", "def shape(self):\n return self._data.shape", "def shape(self):\n return self._data.shape", "def __init__(self, input_size=88):\n self._input_size = input_size", "def get_input_dims(self, U):\n return U.size()[1:]", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def data_shapes(self):", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def _input_size(self):\n return self.embedding_size + self.hidden_size", "def input_tensorspec(self):\n return self._tensorspec", "def get_output_shape(self):\n return self.incoming_shapes[0]", "def get_output_shape(self):\n return self.incoming_shapes[0]", "def get_output_shape(self):\n return self.incoming_shapes[0][:-1] + [sum([s[-1] for s in self.incoming_shapes])]", "def shape(self):\n return self.__value.shape", "def kernel_input(self):\n\t\treturn self.kernel_shape_param('I')", "def get_output_shape(self):\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[:-1]] + [self.n_units]", "def compute_output_shape(self, input_shape):\n \n assert input_shape and len(input_shape) == 2\n return input_shape[0], self.n_clusters", "def compute_output_shape(self, input_shape):\n return [\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\n ]", "def processed_shape(self, shape):\n return shape" ]
[ "0.7809569", "0.7646489", "0.7484749", "0.7484749", "0.7484749", "0.741397", "0.70947266", "0.69226736", "0.690702", "0.68537086", "0.6853561", "0.6770632", "0.6769456", "0.67648906", "0.66720814", "0.6651734", "0.6614571", "0.6614571", "0.6606001", "0.6606001", "0.6606001", "0.6584764", "0.65660125", "0.6542314", "0.6522548", "0.65166676", "0.6508793", "0.64842", "0.6479382", "0.64237624", "0.641908", "0.6397645", "0.63791776", "0.6359826", "0.6359826", "0.6359235", "0.63492054", "0.63492054", "0.63492054", "0.63492054", "0.63492054", "0.63285714", "0.63196945", "0.63168883", "0.6307255", "0.63028187", "0.62884545", "0.6287102", "0.6280249", "0.62743926", "0.6271533", "0.62682873", "0.6262318", "0.625721", "0.625721", "0.625721", "0.62166935", "0.6213209", "0.6211442", "0.62101126", "0.6207893", "0.6205404", "0.619582", "0.6187543", "0.6187045", "0.6185656", "0.61825144", "0.61815435", "0.6177538", "0.6177538", "0.617715", "0.61672795", "0.61649644", "0.61584127", "0.6157602", "0.61498076", "0.61498076", "0.61498076", "0.6145944", "0.6129025", "0.61225873", "0.61225873", "0.61225873", "0.61225873", "0.61225873", "0.6121563", "0.6109373", "0.61031276", "0.61031276", "0.60881954", "0.6077223", "0.60690004", "0.60690004", "0.60663164", "0.6065569", "0.6057238", "0.60493654", "0.60450256", "0.6045001", "0.6041353" ]
0.7097524
6
Training data for this task
def train_data(self): return self._train_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, data):\n pass", "def train(self, training_data):\n pass", "def getTrainingData(self):\n raise NotImplementedError", "def train(self, trainData):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def _load_training_data(self):\n self._save_training_data()", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def train():\n pass", "def train(self, trainfile):", "def train(self):\n raise NotImplementedError", "def train(self):\n return", "def trainData(self, X, y, NeuralNet, epochs):", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def train(self, training_steps=10):", "def train(self, ):\n raise NotImplementedError", "def train(self):\n\t\traise NotImplementedError", "def trainModel( self, featureTrain, classTrain):", "def train(self, batch):\n pass", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def train(self, num_batches: int):", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def train(self) -> Any:\n pass", "def get_data_train(self):\n return self.get_data(self.file_train, self.batch)", "def train(self):\n raise NotImplementedError()", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def train(self)->None:", "def __train_model(self):\n for i in range(self.file_index):\n logger.info(\"Training the ALS model dataset \" + str(i))\n self.als = ALS(maxIter=5, regParam=0.01, userCol=\"UserId\", itemCol=\"GameId\", ratingCol=\"Userscore\",\n coldStartStrategy=\"drop\")\n self.model[i] = self.als.fit(self.df[i])\n logger.info(\"ALS model built!\")", "def train(self, X, y):", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self):\n self.training = True", "def trainNet():", "def train_model(self, data:List[np.ndarray]):\n d = np.vstack(data)\n np.random.shuffle(d)\n self.regressor.fit(\n X=self.input(d),\n y=self.output(d)\n )", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def set_training_data(self, *, inputs: Inputs) -> None:\n\t\tsuper().set_training_data(inputs=inputs)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def make_training_xy(self, data):\n pass", "def _train(self):\n self.train_acc.reset_states()\n self.val_acc.reset_states()\n self.train_loss.reset_states()\n self.val_loss.reset_states()\n\n self.train_ds.shuffle(buffer_size=1000)\n for idx, (x,y) in enumerate(self.train_ds):\n self.tf_train_step(x, y)\n\n for x,y in self.val_ds:\n self.tf_val_step(x, y)\n\n # It is important to return tf.Tensors as numpy objects.\n return {\n \"epoch\": self.iteration,\n \"loss_train\": self.train_loss.result().numpy(),\n \"loss_val\": self.val_loss.result().numpy(),\n \"acc_train\": self.train_acc.result().numpy(),\n \"acc_val\": self.val_acc.result().numpy(),\n }", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def get_training_data(self):\n\n # this actually never was a set\n # src_set = self.target['src'].values\n # dst_set = self.target['dst'].values\n\n # train_negative = self.get_negative_edges(src_set, dst_set, self.train_ind.shape[0]) # * self.K)\n # test_negative = self.get_negative_edges(src_set, dst_set, self.test_ind.shape[0])\n\n train_positive = self.target.iloc[self.train_edge_ind].values\n test_positive = self.target.iloc[self.test_edge_ind].values\n\n # # print(train_positive.shape, train_negative.shape, test_positive.shape, test_negative.shape)\n # print(f\"Working with {train_positive.shape[0]} positive and {train_negative.shape[0]} negative samples in the train set, {test_positive.shape[0]} and {test_negative.shape[0]} - in test set\")\n\n X_train = train_positive\n X_test = test_positive\n\n y_train = np.ones((self.train_edge_ind.shape[0],))\n y_test = np.ones((self.test_edge_ind.shape[0],))\n\n # X_train = np.vstack([\n # train_positive,\n # train_negative\n # ])\n\n # X_test = np.vstack([\n # test_positive,\n # test_negative\n # ])\n\n # y_train = np.concatenate([np.ones((self.train_ind.shape[0],)), np.zeros((self.train_ind.shape[0]),)]) # self.train_ind.shape[0]) * self.K\n # y_test = np.concatenate([np.ones((self.test_ind.shape[0],)), np.zeros((self.test_ind.shape[0],))])\n\n assert X_train.shape[0] == y_train.shape[0]\n assert X_test.shape[0] == y_test.shape[0]\n\n def shuffle(X, y):\n ind_shuffle = np.arange(0, X.shape[0])\n np.random.shuffle(ind_shuffle)\n return X[ind_shuffle], y[ind_shuffle]\n\n self.X_train, self.y_train = shuffle(X_train, y_train)\n self.X_test, self.y_test = shuffle(X_test, y_test)\n\n print(f\"Splitting into {self.X_train.shape[0]} train and {self.X_test.shape[0]} test samples\")\n\n # return X_train, X_test, y_train, y_test", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self, x_train, y_train, x_val, y_val):\n pass", "def train(self, X, y):\n pass", "def _train(self):\n\n batch = random.sample(self.D, min(self.batch_size, len(self.D)))\n no_state = np.zeros(self.stateCnt)\n\n states = [ o[0] for o in batch]\n states_ = [ (no_state if o[3] is None else o[3]) for o in batch ]\n\n p = []\n p_ = []\n for ii in range(len(batch)):\n p.append(self._predict(states[ii][:,:,:]))\n p_.append(self._predict(states_[ii][:,:,:]))\n\n batchLen = len(batch)\n\n x = np.zeros((batchLen, 84, 84, 1))\n y =np.zeros((batchLen, 11,11,6))\n\n for i in range(batchLen):\n o = batch[i]\n s = o[0]; a = o[1]; r = o[2]; s_ = o[3]\n\n t = p[i][0,:,:,:]\n if s_ is None:\n t[a] = r\n else:\n t[a] = r + self.gamma* np.amax(p_[i])\n x[i] = s\n y[i] = t\n\n self.model.fit(x,y,nb_epoch=1,verbose=0)", "def train(self, X, y):\n self.X_train = X\n self.y_train = y", "def train(self, X, y):\n self.X_train = X\n self.y_train = y", "def ytrain(self,)->list:", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train_data(self):\n\n return self.__train_data, self.__train_labels", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def train(self, training_data, cfg, **kwargs):\n pass", "def get_training_data() -> GraphDataset:\n _load_data_if_needed()\n return training_data", "def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def prepare_train_data(self):\r\n ## Impute rlkpis\r\n print(\"Imputing rlKPI df\")\r\n self.rlkpi.add_target_labels(1)\r\n self.rlkpi.impute_rl_kpis()\r\n\r\n print(\"Add 'met-real-station_no' & met-forecast-station_no to rl_kpis_df\")\r\n self.add_met_real_forecast_station_col_to_rlkpis()\r\n print(\"Merge 'met-real-sampled df to rl kps \")\r\n self.merge_met_real_sampled_df_to_rlkpis()\r\n\r\n ## Imputations for met-forecast\r\n print(\"Impute met-forecast\")\r\n met_forecast_obj = self.metfcast\r\n met_forecast_obj.impute_met_forecast()\r\n\r\n #Merge met forecast data to earlier merged data\r\n print(\"Merge Train data with imputed forecast df\")\r\n self.train_data = pd.merge(self.train_data,\r\n met_forecast_obj.imputed_forecast_df,\r\n on=['datetime-station_no'], indicator=True, how='inner')\r\n print(\"Check any imputation needed\", self.train_data.isna().sum().sum())\r\n self.train_data.drop(['_merge'], axis=1, inplace=True)\r\n self.perform_data_under_sampling(self.train_data)", "def data():\n\n run_type = 'standardised'\n sr = 48000\n train_perc = 0.9\n\n if sr == 48000:\n time_dimension = 282\n if sr == 44100:\n time_dimension = 259\n\n x_train, y_train, x_test, y_test = essential.compile_dataset(run_type, sr)\n\n # reshape for CNN input\n x_train = np.array([x.reshape((128, time_dimension, 1)) for x in x_train])\n x_test = np.array([x.reshape((128, time_dimension, 1)) for x in x_test])\n\n # encoded \n encoder = LabelEncoder()\n encoder.fit(y_train)\n encoder.fit(y_test)\n y_train = encoder.transform(y_train)\n y_test = encoder.transform(y_test)\n\n return x_train, y_train, x_test, y_test", "def test_training(self):\n\t\tpass", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def trainData(self,):\n count = 0\n while count < len(self.RAD_sequences_train):\n RAD_filename = self.RAD_sequences_train[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"train_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1\n if count == len(self.RAD_sequences_train) - 1:\n # np.random.seed() # should I add seed here ?\n np.random.shuffle(self.RAD_sequences_train)", "def add_training_data(self, X):\n\n raise NotImplementedError(\"not implemented!\")", "def pre_train(self, dataset, **kwargs):\n\n pass", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def load_data(self, task):\n params = self.params\n data = {splt: {} for splt in ['train', 'valid', 'test']}\n dpath = os.path.join(params.data_path, 'eval', task)\n\n self.n_sent = 1 if task in ['SST-2', 'CoLA'] else 2\n\n for splt in ['train', 'valid', 'test']:\n\n # load data and dictionary\n data1 = load_binarized(os.path.join(dpath, '%s.s1.pth' % splt), params)\n data2 = load_binarized(os.path.join(dpath, '%s.s2.pth' % splt), params) if self.n_sent == 2 else None\n data['dico'] = data.get('dico', data1['dico'])\n\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n if self.n_sent == 2:\n set_dico_parameters(params, data, data2['dico'])\n\n # create dataset\n if self.n_sent == 1:\n data[splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n else:\n data[splt]['x'] = ParallelDataset(\n data1['sentences'], data1['positions'],\n data2['sentences'], data2['positions'],\n params\n )\n\n # load labels\n if splt != 'test' or task in ['MRPC']:\n # read labels from file\n with open(os.path.join(dpath, '%s.label' % splt), 'r') as f:\n lines = [l.rstrip() for l in f]\n # STS-B task\n if task == 'STS-B':\n assert all(0 <= float(x) <= 5 for x in lines)\n y = [float(l) for l in lines]\n # QQP\n elif task == 'QQP':\n UNK_LABEL = 0\n lab2id = {x: i for i, x in enumerate(sorted(set(lines) - set([''])))}\n y = [lab2id.get(x, UNK_LABEL) for x in lines]\n # other tasks\n else:\n lab2id = {x: i for i, x in enumerate(sorted(set(lines)))}\n y = [lab2id[x] for x in lines]\n data[splt]['y'] = torch.LongTensor(y)\n assert len(data[splt]['x']) == len(data[splt]['y'])\n\n # compute weights for weighted training\n if task != 'STS-B' and params.weighted_training:\n weights = torch.FloatTensor([\n 1.0 / (data['train']['y'] == i).sum().item()\n for i in range(len(lab2id))\n ]).npu()\n self.weights = weights / weights.sum()\n else:\n self.weights = None\n\n return data", "def eval(self):\n self.train(mode=False)", "def start_training(self):\n self.training = True", "def train(self, data):\n num_features = data[0].number_of_features\n self.init_weights(num_features)\n for iteration in range(1, self.iterations+1):\n print('iteration:', iteration)\n for example in data:\n self.update_weights(example)", "def train_model(self):\n self.best_epoch = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n self.best_f1 = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n for t in self.topic:\n if t != 'other':\n for st in self.topic2sub_topic[t].keys():\n\n print(\"Now training the classsfier for topic: \", t, \" ; intent: \", st)\n print(128 * \"=\")\n print(\"Input: str; Output: boolean(if the str contents the intent: \", st, \" ).\")\n print(64 * \"-\")\n X, y = self.get_data(t, st)\n print(\"data_loaded!\")\n X_train, X_dev, y_train, y_dev = self.my_train_test_split(X, y)\n best_f1 = 0\n for e in range(1,10):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=[1024, ]))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='relu'))\n model.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=[metrics.mae, metrics.categorical_accuracy])\n model.fit(X_train, y_train, epochs=e, batch_size=128)\n print(\"f1_score on dev set: \")\n f1 = self.f1_score_model(model, X_dev, y_dev)[0]\n if f1 > best_f1:\n self.model_zoo[t][st] = model\n model.save_weights(self.trained_w_folder+\"/%s/%s.h5\" %(t,st))\n self.best_epoch[t][st] = e\n self.best_f1[t][st] = f1\n best_f1 = f1\n\n print(64*\"=\")\n print()", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def _training__(self):\n self.input_size, self.output_size = self.X_train.shape[1], self.y_train.shape[1]\n w1 = np.random.uniform(size=[self.input_size, self.hidden_size])\n b = np.random.uniform(size=[1, self.hidden_size])\n H = self._activation__(np.add(np.matmul(self.X_train, w1), b))\n w2 = np.dot(np.linalg.pinv(H), self.y_train)\n self.model = {\"w1\": w1, \"b\": b, \"w2\": w2}", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def build_training_data_loader(self) -> DataLoader:\n pass", "def train_one_epoch(self):\n raise NotImplementedError", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def get_train(self, data_file):\r\n return self.read_data(data_file)", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n self.trainingData = trainingData\n self.trainingLabels = trainingLabels", "def train(self, x_data, y_data):\n self.model.fit(np.array(x_data), np.array(y_data),\n batch_size=2,\n epochs=3,\n verbose=1)\n self.model.save_weights(self.model_filename)", "def train_step(self):\n pass", "def training_data(self):\n if self._training_data is None:\n self._load_training_data()\n if self._swapped_training_data is None:\n self._swapped_training_data = {}\n for key, value in self._training_data.items():\n self._swapped_training_data[key] = value\n return self._swapped_training_data" ]
[ "0.7985163", "0.79244554", "0.76018167", "0.7599324", "0.7536897", "0.7536897", "0.7536897", "0.7536897", "0.7536897", "0.7503888", "0.73865134", "0.73503995", "0.7335692", "0.7312381", "0.731068", "0.7301079", "0.7278077", "0.72348267", "0.72277933", "0.72013533", "0.7183543", "0.71257526", "0.70788765", "0.7077327", "0.7073348", "0.706992", "0.70519036", "0.70422435", "0.7028003", "0.7011521", "0.7002827", "0.7001773", "0.69419986", "0.6937422", "0.6920752", "0.68908906", "0.6880295", "0.68691534", "0.6857734", "0.6852145", "0.6849259", "0.6848641", "0.6834758", "0.68343705", "0.68231034", "0.6814031", "0.6807282", "0.68016064", "0.67892635", "0.6788053", "0.6787829", "0.6785602", "0.6780684", "0.67794466", "0.67770946", "0.6772022", "0.6762398", "0.6762398", "0.6762027", "0.67596966", "0.67547494", "0.67542887", "0.6735482", "0.6722831", "0.67099553", "0.6707997", "0.6705563", "0.67019445", "0.6694289", "0.66903067", "0.6681616", "0.6680564", "0.6679359", "0.6679337", "0.6671704", "0.6668308", "0.6656527", "0.6656343", "0.6656343", "0.6656343", "0.6656343", "0.6656343", "0.663884", "0.66330487", "0.66317743", "0.6623289", "0.66203696", "0.6619457", "0.66194415", "0.66192394", "0.66165715", "0.6615867", "0.66144407", "0.65996486", "0.6598514", "0.65878886", "0.6582218", "0.65767854", "0.65741104", "0.6572976" ]
0.6909976
35
Validation data for this task
def valid_data(self): return self._valid_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_dataset(self):\n pass", "def validate(cls, data, errors):", "def validate():", "def _validate(self):\n pass", "def validate(self, data):\n raise NotImplementedError(\"Inherit this class and override this method.\")", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self, task):\n raise NotImplementedError(\"must subclass and implement validate\")", "def validate(self):", "def validate(self):", "def __validate(self):\n pass", "def _data_params_validation(self) -> None:\n extra_regressor_names = set(self.params._reqd_regressor_names)\n # univariate case\n if self.data.is_univariate():\n if len(extra_regressor_names) != 0:\n msg = (\n f\"Missing data for extra regressors: {self.params._reqd_regressor_names}! \"\n \"Please include the missing regressors in `data`.\"\n )\n raise ValueError(msg)\n # multivariate case\n else:\n value_cols = set(self.data.value.columns)\n if \"y\" not in value_cols:\n msg = \"`data` should contain a column called `y` representing the responsive value.\"\n raise ValueError(msg)\n if not extra_regressor_names.issubset(value_cols):\n msg = f\"`data` should contain all columns listed in {extra_regressor_names}.\"\n raise ValueError(msg)\n # validate cap\n if (self.params.cap is True) and (\"cap\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `cap` representing the cap when `cap = True`.\"\n _error_msg(msg)\n # validate floor\n if (self.params.floor is True) and (\"floor\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `floor` representing the floor when `floor = True`.\"\n _error_msg(msg)", "def validate(self):\n raise NotImplementedError", "def validate(self):\n raise NotImplementedError", "def __validate():\n # TODO: implement", "def _validate_create_data(self, data):\n return", "def validate(self, data):\n try:\n if data['data_start'] > data['data_end']:\n raise serializers.ValidationError(\"finish must occur after start\")\n if data['data_start'] < timezone.now():\n raise serializers.ValidationError(\"Yesterday has already passed\")\n if data['remind']:\n if data['data_start'] - datetime.timedelta(hours=data['remind']) < timezone.now():\n raise serializers.ValidationError(\"Reminder before the date\")\n except KeyError:\n # if data['data_end'] KeyError\n return data\n return data", "def _validate_data(self, vms, fw_rules):\n self._validate_vms(vms)\n self._validate_fw_rules(fw_rules)\n self._validated = True", "def _validate(self):\n _models = {'hrrr', 'hrrrak', 'rap'}\n _fields = {'prs', 'sfc', 'nat', 'subh'}\n \n self.date = pd.to_datetime(self.date)\n \n if self.model == 'alaska':\n self.model == 'hrrrak'\n\n assert self.fxx in range(49), \"Forecast lead time `fxx` is too large\"\n assert self.model in _models, f\"`model` must be one of {_models}\"\n if self.model in ['hrrr', 'hrrrak']:\n assert self.field in _fields, f\"`field must be one of {_fields}\"\n else:\n # field is not needed for RAP model.\n self.field = ''\n \n if isinstance(self.priority, str):\n self.priority = [self.priority]\n \n self.priority = [i.lower() for i in self.priority]\n\n # Don't look for data from NOMADS if requested date is earlier\n # than yesterday. NOMADS doesn't keep data that old.\n if 'nomads' in self.priority:\n yesterday = datetime.utcnow() - timedelta(hours=24)\n yesterday = pd.to_datetime(f\"{yesterday:%Y-%m-%d}\")\n if self.date < yesterday:\n self.priority.remove('nomads')", "def validate(self):\n raise NotImplementedError()", "async def validate(self):\n pass", "def validate(self, data):\n\n def getattr_patched(attr_name):\n \"\"\"\n This utility function retrieves 'attr_name' from data if it is present,\n otherwise it uses the value from self.instance. This is necessary because\n data will not have entries for all the fields in the Model\n if a partial update (PATCH) is performed.\n \"\"\"\n if attr_name in data:\n return data[attr_name]\n if self.instance and hasattr(self.instance, attr_name):\n return getattr(self.instance, attr_name)\n return None\n\n step = getattr_patched(\"step\")\n user_responses = getattr_patched(\"user_responses\")\n\n has_required_inputs = bool(\n WorkflowStepUserInput.objects.filter(workflow_step=step, required=True)\n )\n\n workflow_collection_engagement = getattr_patched(\n \"workflow_collection_engagement\"\n )\n\n workflow_collection: WorkflowCollection = (\n workflow_collection_engagement.workflow_collection\n )\n\n state: EngagementStateType = workflow_collection_engagement.state\n\n # CHECK 1: Does the specified step belong to a workflow in the specified collection?\n if not workflow_collection.workflowcollectionmember_set.filter(\n workflow__workflowstep=step\n ):\n raise serializers.ValidationError(\n \"Step must belong to a workflow in the collection\"\n )\n\n \"\"\"\n CHECK 2\n Usually, the UUID of the step being submitted must be either match \n state['next']['step_id'] or state['previous']['step_id'] to prevent the user \n from getting a sort of Frankenstein engagement with messed up data.\n\n However, there are a couple of cavaets to this if the collection is \n an unordered activity.\n\n The first is that a user can start such an engagement on any workflow.\n The second is that they can move to any other workflow after completing\n a workflow.\n\n In BOTH of these scenarios the state of the engagement will have a None \n value for both state[\"next\"][\"step_id\"] and state[\"previous\"][\"step_id\"] values.\n\n We will search for that condition, and if present, allow the user to submit\n data for any step that is the first step of a collection workflow.\n \"\"\"\n if (\n workflow_collection.category == \"ACTIVITY\"\n and not workflow_collection.ordered\n and state[\"next\"][\"step_id\"] == None\n and state[\"previous\"][\"step_id\"] == None\n ):\n if WorkflowStep.objects.filter(\n workflow=step.workflow, order__lt=step.order\n ):\n raise serializers.ValidationError(\n \"Posted step must be the first step in a workflow\"\n )\n\n else:\n if step.id not in (state[\"next\"][\"step_id\"], state[\"previous\"][\"step_id\"]):\n raise serializers.ValidationError(\n \"Posted step must be next step or previous step.\"\n )\n\n \"\"\"EXAMPLE JSON PAYLOAD\n\n {\n \"detail\": \"http://localhost:8000/api/workflow_system/users/self/workflows/engagements/6dfe24d5-9e2d-4308-9c33-e878a3d378b4/details/ad4e2263-d468-4adb-9c0a-b96740ccacd1/\",\n \"workflow_collection_engagement\": \"6dfe24d5-9e2d-4308-9c33-e878a3d378b4\",\n \"step\": \"353a1aba-57fd-4183-802e-083d53863601\",\n \"user_responses\": [\n {\n \"submittedTime\": \"2021-07-26 18:33:06.731050+00:00\",\n \"inputs\": [\n {\n \"stepInputID\": \"758f482d-3eb0-4779-bf2a-bad9e452ea0e\", \n \"stepInputUIIdentifier\": \"question_1\",\n \"userInput\": \"Red\"\n },\n {\n \"stepInputID\": \"96e7f658-7f08-4432-b3d1-f483f01aa19b\", \n \"stepInputUIIdentifier\": \"question_2\",\n \"userInput\": false\n },\n {\n \"stepInputID\": \"2312304f-ceb3-4fea-b93f-94420060b238\", \n \"stepInputUIIdentifier\": \"question_3\",\n \"userInput\": \"hi\"\n }\n ]\n },\n {\n \"submittedTime\": \"2021-07-26 18:33:06.731050+00:00\",\n \"inputs\": [\n {\n \"stepInputID\": \"758f482d-3eb0-4779-bf2a-bad9e452ea0e\", \n \"stepInputUIIdentifier\": \"question_1\",\n \"userInput\": \"Red\"\n },\n {\n \"stepInputID\": \"96e7f658-7f08-4432-b3d1-f483f01aa19b\", \n \"stepInputUIIdentifier\": \"question_2\",\n \"userInput\": true\n },\n {\n \"stepInputID\": \"2312304f-ceb3-4fea-b93f-94420060b238\", \n \"stepInputUIIdentifier\": \"question_3\",\n \"userInput\": \"hi\"\n }\n ]\n }\n ],\n \"started\": \"2021-07-26T08:00:28-05:00\",\n \"finished\": null\n }\n\n \"\"\"\n\n # CHECK 4\n # 1: Ensure all required attributes are present for each question in the payload.\n # 2: Ensure user input data in payload corresponds to actual, defined user inputs for the step.\n # 3: Sorted user inputs for further validation in CHECK 5.\n collected_user_inputs_by_step_input_id = {}\n\n # Outer Loop: User Response Sets\n for index, user_input_set in enumerate(user_responses):\n\n # Inner Loop: Each Input in the Response Set\n for user_input in user_input_set[\"inputs\"]:\n\n # Ensure required keys are present for each input.\n try:\n step_input_id = user_input[\"stepInputID\"]\n step_input_UI_identifier = user_input[\"stepInputUIIdentifier\"]\n response = user_input[\"userInput\"]\n except KeyError as e:\n raise serializers.ValidationError(\n \"Missing key in questions entry {}\".format(e.args[0])\n )\n\n if not WorkflowStepUserInput.objects.filter(\n id=step_input_id, ui_identifier=step_input_UI_identifier\n ):\n raise serializers.ValidationError(\n f\"No step with given stepInputID {step_input_id} and stepInputUIIdentifier {step_input_UI_identifier} exists.\"\n )\n\n # Add the user input to our sorted collection for further checks.\n if step_input_id not in collected_user_inputs_by_step_input_id.keys():\n collected_user_inputs_by_step_input_id[step_input_id] = {}\n collected_user_inputs_by_step_input_id[step_input_id][\n index\n ] = user_input\n\n # CHECK 5 - Final Checks\n # Evaluate each defined WorkflowStepUserInput object for the step\n # and make sure that required answers are present and conform\n # to the specification for the object.\n for step_input in WorkflowStepUserInput.objects.filter(workflow_step=step):\n step_input_id = str(step_input.id)\n\n # Determine if the user has one or more answers for the current WorkflowStepUserInput\n if step_input_id not in collected_user_inputs_by_step_input_id:\n # No answers. Now see if answers were required.\n if step_input.required:\n raise serializers.ValidationError(\n \"A response is required, but missing, for step_input id {}\".format(\n step_input_id\n )\n )\n\n else:\n # TODO: This checking process, in general, could probably benefit\n # from a little bit of clean-up. This is too broad in that it will\n # handle both \"incorrect\" answers and radical schema violations in the same way.\n responses_to_input = collected_user_inputs_by_step_input_id[\n step_input_id\n ]\n for index, response in responses_to_input.items():\n try:\n jsonschema.validate(\n instance=response, schema=step_input.response_schema\n )\n except jsonschema.ValidationError:\n # This answer is not valid\n for entry in user_responses[index][\"inputs\"]:\n if step_input_id == entry[\"stepInputID\"]:\n entry[\"is_valid\"] = False\n break\n else:\n # This is!\n for entry in user_responses[index][\"inputs\"]:\n if step_input_id == entry[\"stepInputID\"]:\n entry[\"is_valid\"] = True\n break\n\n return data", "def check_validity(self):", "def validate(self):\n raise NotImplementedError('validate method not implemented.')", "def run_validation(self, data=empty):\n self._validated_data = super().run_validation(data)\n return self._validated_data", "def validate(self):\n raise NotImplementedError(\"validate function needs to be implemented for validators\")", "def validate(self):\n X_orig = make_X_from_features(self._conf)\n train_sz = len(load_array(self._conf, 'task.dataset.id_train'))\n X = X_orig[:train_sz, :]\n y = load_array(self._conf, 'task.dataset.y_train')\n y = y.reshape(y.size)\n\n cv_method_name = self._conf['task']['params']['validation']['class']\n cv_params_name = self._conf['task']['params']['validation'].get(\n 'params', {})\n cv_params_name = _to_str_value(cv_params_name)\n\n cv_method = dynamic_load(cv_method_name)\n mean_cv_score = cv_method(X, y, self, **cv_params_name)\n\n task_metrics = self._conf['task']['params']['metrics']\n task_method = task_metrics['method']\n\n ume.db.add_validation_score(\n os.path.basename(self._jn),\n ume.__version__,\n task_method,\n mean_cv_score)", "def _check_validity(self):\n pass", "def validate(self):\n ...", "def question_new_validate():", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def run_validation(self, data=empty):\n\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(f) for f in unknown]\n raise ValidationError({api_settings.NON_FIELD_ERRORS_KEY: errors})\n return super().run_validation(data)", "def validate(self, data):\n\n end_data = data.get('end') or self.instance.end\n start_data = data.get('start') or self.instance.start\n\n if end_data <= start_data:\n raise serializers.ValidationError({\n 'end': ['Data de termino deve ser maior que a data de inicio'],\n })\n\n query = Meeting.objects.filter(\n meeting_room=data.get('meeting_room') or self.instance.meeting_room.id,\n start__gte=start_data, end__lte=end_data\n )\n if self.instance:\n query = query.exclude(pk=self.instance.pk)\n\n if query.exists():\n raise serializers.ValidationError(\n 'Esta sala ja esta reservada para esse horario')\n\n return data", "def validate(self):\n pass # pylint: disable=unnecessary-pass", "def run_validation(self, data=empty):\n\n # no idea why there is no such built in feature in DRF\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(f) for f in unknown]\n raise ValidationError({api_settings.NON_FIELD_ERRORS_KEY: errors})\n return super().run_validation(data)", "def __validate_input(self, request_data):\n call_id = request_data.get(strings.CALL_ID_KEY)\n request_timestamp = request_data.get(strings.TIMESTAMP_KEY)\n request_start = request_data.get(strings.START_KEY)\n validation = None\n if call_id and request_timestamp and request_start is not None:\n call_detail_query = CallDetail.objects.filter(call_id=call_id)\n if call_detail_query:\n if len(call_detail_query) < CALL_DETAILS_LIMIT:\n stored_call_detail = call_detail_query[0]\n if isinstance(request_start, str):\n if request_start in strings.TRUE_VALUES:\n request_start = True\n else:\n request_start = False\n if stored_call_detail.start == request_start:\n validation = {strings.INPUT_ERROR_KEY:\n strings.START_END_ERROR}\n stored_timestamp = standardize_date(\n stored_call_detail.timestamp,\n strings.COMPLETE_DATE_PATTERN)\n request_timestamp = standardize_date(request_timestamp,\n strings.\n COMPLETE_DATE_PATTERN)\n if stored_timestamp == request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.EQUAL_TIMESTAMPS_ERROR}\n if stored_call_detail.start and not request_start:\n if stored_timestamp > request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n elif not stored_call_detail.start and request_start:\n if stored_timestamp < request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n else:\n validation = {strings.INPUT_ERROR_KEY:\n strings.CALL_LIMIT_ERROR}\n\n return validation", "def validate_data(self, deployment='ops'):\n self.validator.set_example(self.example)\n\n # Don't just use the built in validate_data method as this needs to be future proofed against C100 firmware\n # upgrades. This upgrade will result in a new mode SELAP (R...CNTL2MODE == 64).\n self.validator.validate_capture_file_counts()\n self.validator.validate_capture_file_waveforms()\n\n # Many of these examples will have some amount of rounding error.\n self.validator.validate_waveform_times(min_end=10.0, max_start=-1534.0, step_size=0.2)\n self.validator.validate_cavity_modes(mode=(4, 64), deployment=deployment)\n self.validator.validate_zones()", "def validate(self):\n self.filter_passing_hits()\n\n checks = {\"number of hits\":self.check_hits(),\n \"base pair count\":self.check_bp(),\n \"contig count\":self.check_contigs(),\n \"characters\": self.check_chars(),\n \"checksum\":not check_checksum(self.seqdata.checksum)}\n\n failed_checks = {(k, v) for k, v in checks.iteritems() if v is False}\n\n if failed_checks:\n \"\"\"\n replace this with logger, break would be replaced by a raised\n Exception where the Exception would be caught by the\n Sequence_Upload code\n \"\"\"\n for k, v in failed_checks:\n with open(generate_path(\"outputs/seq_errors.txt\"), \"a\") as file_:\n file_.write(\n '%s failed validation:'\n 'the %s was not valid\\n' %(self.seqdata.accession, k)\n )\n self.seqdata.valid = False\n else:\n self.seqdata.valid = True", "def validate(self, data):\n if data[\"start_time_period\"] > data[\"end_time_period\"]:\n raise serializers.ValidationError(\"End time can not be before start time\")\n elif (timezone.now() - data[\"end_time_period\"]) > timedelta(days=1):\n raise serializers.ValidationError(\"Weather can not be over 24 hours old\")\n elif data[\"end_time_period\"] > timezone.now():\n raise serializers.ValidationError(\"End time can not be in the future\")\n elif (data[\"end_time_period\"] - data[\"start_time_period\"]) >= timedelta(days=1):\n raise serializers.ValidationError(\"Only supports 1 day time frame for weather\")\n return data", "def validate(self, data):\n # if data['is_private'] and data['contestants']:\n # raise serializers.ValidationError(\"Can not be private and compete for an award.\")\n return data", "def valid(self):\n pass", "def is_valid(self, dataset):\n pass", "def validate(self, data):\n if data.has_key('site'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, site=data['site']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n elif data.has_key('project'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, project=data['project']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n return data", "def check_data_validity(X, y, query, task):\n # ADD IMPLEMENTATION HERE", "async def validate(self) -> None:\n if self.run.start_stage == PipelineStage.STT:\n if self.run.pipeline.stt_engine is None:\n raise PipelineRunValidationError(\n \"the pipeline does not support speech to text\"\n )\n if self.stt_metadata is None:\n raise PipelineRunValidationError(\n \"stt_metadata is required for speech to text\"\n )\n if self.stt_stream is None:\n raise PipelineRunValidationError(\n \"stt_stream is required for speech to text\"\n )\n elif self.run.start_stage == PipelineStage.INTENT:\n if self.intent_input is None:\n raise PipelineRunValidationError(\n \"intent_input is required for intent recognition\"\n )\n elif self.run.start_stage == PipelineStage.TTS:\n if self.tts_input is None:\n raise PipelineRunValidationError(\n \"tts_input is required for text to speech\"\n )\n if self.run.end_stage == PipelineStage.TTS:\n if self.run.pipeline.tts_engine is None:\n raise PipelineRunValidationError(\n \"the pipeline does not support text to speech\"\n )\n\n start_stage_index = PIPELINE_STAGE_ORDER.index(self.run.start_stage)\n\n prepare_tasks = []\n\n if start_stage_index <= PIPELINE_STAGE_ORDER.index(PipelineStage.STT):\n # self.stt_metadata can't be None or we'd raise above\n prepare_tasks.append(self.run.prepare_speech_to_text(self.stt_metadata)) # type: ignore[arg-type]\n\n if start_stage_index <= PIPELINE_STAGE_ORDER.index(PipelineStage.INTENT):\n prepare_tasks.append(self.run.prepare_recognize_intent())\n\n if start_stage_index <= PIPELINE_STAGE_ORDER.index(PipelineStage.TTS):\n prepare_tasks.append(self.run.prepare_text_to_speech())\n\n if prepare_tasks:\n await asyncio.gather(*prepare_tasks)", "def _validate_update_data(self, data):\n return", "def clean(self):\r\n cleaned_data = super(RPEventCreateForm, self).clean()\r\n self.check_risk()\r\n self.check_costs()\r\n self.check_location_or_plotroom()\r\n return cleaned_data", "def _validate_input(self):\n self.data.validate()\n self.meta_hybridizer.validate_input()", "def test_case_3(self):\n with open(f'{TEST_DATA_DIR}/r1.json') as file:\n data = json.load(file)\n self.assertIsInstance(data, dict)\n\n task_1 = Task.new(data=data)\n self.assertTrue(task_1.validate())\n\n with self.assertRaises(GCGValidationError):\n task_2 = Task.new(data={'data': 'bad_data'})", "def _validate(self):\n REQUIRED_KEYS = [ 'name', 'year', 'artist_id', 'genre_ids', 'sources' ]\n\n missing_keys = get_missing_keys(self.request.data, REQUIRED_KEYS)\n if len(missing_keys) > 0:\n return f\"Request body is missing the following required properties: {', '.join(missing_keys)}.\"\n\n artist_id = self.request.data['artist_id']\n\n try:\n Artist.objects.get(pk=artist_id)\n except Artist.DoesNotExist:\n return \"`artistId` supplied does not match an existing artist.\" \n\n genre_ids = self.request.data['genre_ids']\n if len(genre_ids) == 0:\n return \"You must specify at least one genre id in `genreIds` array.\"\n\n for genre_id in genre_ids:\n try:\n Genre.objects.get(pk=genre_id)\n except Genre.DoesNotExist:\n return f\"The genre id {genre_id} does not match an existing genre.\"\n\n sources = self.request.data['sources']\n if len(sources) == 0:\n return \"You must specify at least one source in `sources` array.\"\n\n for source in sources:\n if 'service' not in source or 'url' not in source or 'is_primary' not in source:\n return \"All sources must contain `service`, `url`, and `is_primary` properties.\"\n\n primary_sources = [ source for source in sources if source['is_primary'] == True ]\n if len(primary_sources) != 1:\n return \"There must be one and only one primary source.\"\n\n return False", "def validate(self):\n errors = []\n app = errors.append\n\n if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:\n app(\"self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied\")\n\n if self.omp_threads > self.hw.cores_per_node:\n app(\"omp_threads > hw.cores_per_node\")\n\n if self.mem_per_proc > self.hw.mem_per_node:\n app(\"mem_mb >= self.hw.mem_per_node\")\n\n if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:\n app(\"self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied\")\n\n if self.priority <= 0:\n app(\"priority must be > 0\")\n\n if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):\n app(\"1 <= min_cores <= hardware num_cores >= hint_cores not satisfied\")\n\n if errors:\n raise self.Error(str(self) + \"\\n\".join(errors))", "def validate(self):\n\n raise NotImplementedError('Ando validation is not implemented yet.')", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def validate_emission_input_data(self, emission):\n if emission:\n data = {}\n if emission.has_key('vehicleId'):\n data['vehicle_id'] = self.get_valid_emission_vehicle_id(emission['vehicleId'])\n else:\n raise InvalidUsage('vehicleId is an obligatory field.')\n \n if emission.has_key('vehicleType'):\n data['vehicle_type'] = self.get_valid_emission_vehicle_type(emission['vehicleType'])\n else:\n raise InvalidUsage('vehicleType is an obligatory field.')\n \n if emission.has_key('latitude'):\n data['latitude'] = self.get_valid_emission_latitude(emission['latitude'])\n else:\n raise InvalidUsage('latitude is an obligatory field.')\n \n if emission.has_key('longitude'):\n data['longitude'] = self.get_valid_emission_longitude(emission['longitude'])\n else:\n raise InvalidUsage('longitude is an obligatory field.')\n \n if emission.has_key('timestamp'):\n data['timestamp'] = self.get_valid_emission_timestamp(emission['timestamp'])\n else:\n data['timestamp'] = self.get_valid_emission_timestamp(None)\n \n if emission.has_key('heading'):\n data['heading'] = self.get_valid_emission_heading(emission['heading'])\n else:\n raise InvalidUsage('heading is an obligatory field.')\n \n return data\n \n else:\n raise InvalidUsage('emission can not be a empty body.')", "def validate(self,data):\n if self.context['request'].user!=data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\n user = data['offered_by']\n circle = self.context['circle']\n \n try:\n membership=MemberShip.objects.get(\n user=user,\n circle=circle,\n is_active=True)\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\n\n # La llegada tiene que ser despues de la salida, si la salida es mayor o igual a la llegada marca error\n if data['arrival_date']<=data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\n \n self.context['membership']=membership\n return data", "def run_validation(self, data=empty):\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n\n value = self.to_internal_value(data)\n try:\n self.run_validators(value)\n value = self.validate(value)\n assert value is not None, '.validate() should return the validated data'\n except ValidationError as exc:\n raise ValidationError(detail=as_serializer_error(exc))\n\n return value", "def run_validation(self, data=empty):\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n\n value = self.to_internal_value(data)\n try:\n self.run_validators(value)\n value = self.validate(value)\n assert value is not None, '.validate() should return the validated data'\n except ValidationError as exc:\n raise ValidationError(detail=as_serializer_error(exc))\n\n return value", "def validate(self, data):\n age = data.get(\"age\", None)\n age = age.split(\",\")\n size = data.get(\"size\", None)\n size = size.split(\",\")\n gender = data.get(\"gender\", None)\n gender = gender.split(\",\")\n for i in age:\n if i not in ['b', 'y', 'a', 's']:\n raise serializers.ValidationError(\n \"Age must be either 'b' for baby, 'y' for young,\"\n \" 'a' for adult, or 's' for senior. Can do multiple with\"\n \" commas, ex: a,y,e\")\n for i in size:\n if i not in ['s', 'm', 'l', 'xl']:\n raise serializers.ValidationError(\n \"Size must be either 's' for small, 'm' for medium, 'l' \"\n \"for large, or 'xl' for extra large. Can do multiple with\"\n \" commas, ex: s,l,xl\")\n for i in gender:\n if i not in ['m', 'f']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, or 'f' for female. Can\"\n \" have both using commas, ex: m,f\")\n return data", "def data_validation(self):\n print \"Starting basic data validation ...\"\n allattr = dir(bdefile)\n idx = [ii for ii, attr in enumerate(allattr) if \"validate_oee_error_\" in attr]\n vfunclist = []\n for ii in idx:\n vfunclist += [allattr[ii]]\n\n errorcodes = []\n for vfunc in vfunclist:\n errorcodes += [int(vfunc.split('_')[3])]\n\n errorcodes.sort()\n\n for code in errorcodes:\n sys.stdout.write(\"Checking validation rule %d ... \" % code)\n success, lines = (eval('self.validate_oee_error_'+str(code)))()\n if success:\n print \"PASSED\"\n else:\n self.report_error(code, lines)\n return False\n \n print \"Basic data validation succeeded.\\n\"\n return True", "def validate(self,data):\n offset= timezone.now()- timedelta(minutes=10)\n ride = self.context['ride']\n if ride.departure_date<= offset:\n raise serializers.ValidationError(\"You cant join this ride now\")\n\n if ride.avaible_seats< 1:\n raise serializers.ValidationError('Ride is alredy full')\n \n if ride.passangers.filter(pk=self.context['user'].pk).exists():\n raise serializers.ValidationError('Passenger is already in this trip')\n import pdb;pdb.set_trace()\n return data", "def run_parameters_validations(self):\n if self.risk_rule:\n if 'connectApi' not in self.services:\n return_error(\"You entered a risk rule but the 'connectApi' service is not chosen. \"\n \"Add the 'connectApi' service to the list or remove the risk rule.\")\n else:\n for risk_rule in self.risk_rule:\n if not is_valid_risk_rule(self, risk_rule):\n return_error(f\"The given risk rule: {risk_rule} does not exist,\"\n f\"please make sure you entered it correctly. \\n\"\n f\"To see all available risk rules run the '!rf-get-risk-rules' command.\")\n\n if self.fusion_file_path is not None:\n if 'fusion' not in self.services:\n return_error(\"You entered a fusion file path but the 'fusion' service is not chosen. \"\n \"Add the 'fusion' service to the list or remove the fusion file path.\")", "def validate(self):\n return 1", "def validate(data):\n if 'value' not in data or \\\n 'category' not in data or \\\n 'classification' not in data or \\\n 'account' not in data:\n raise Exception('Missing required field.')\n classifications = ['Personal', 'Essential', 'Savings', 'Income']\n if data['classification'] not in classifications:\n raise Exception('Invalid classification.')", "def validate(self):\n\n # validate phone number\n phone_number = self.data.get('phone_number', '')\n if not phone_number:\n self.add_error('phone_number', const.MESSAGE_FIELD_REQUIRED)\n elif not str(phone_number).isdigit():\n self.add_error('phone_number', const.MESSAGE_FIELD_INVALID_VALUE)\n elif len(str(phone_number)) > 11:\n self.add_error('phone_number', const.MESSAGE_FIELD_INVALID_LENGTH)\n\n # validate period\n month = self.data.get('month')\n year = self.data.get('year')\n if (not month and year) or (month and not year):\n self.add_error('period', const.MESSAGE_PERIOD_WRONG)\n elif month and year:\n try:\n month = int(month)\n year = int(year)\n if len(str(month)) > 2 or len(str(year)) != 4:\n self.add_error(\n 'period', const.MESSAGE_FIELD_INVALID_LENGTH\n )\n else:\n period_date = datetime.date(year, month, 1)\n last_period_date = datetime.date(*last_period(), day=1)\n if period_date > last_period_date:\n self.add_error('period', const.MESSAGE_PERIOD_INVALID)\n except ValueError:\n self.add_error('period', const.MESSAGE_FIELD_INVALID_VALUE)", "def run_validation(self, data=empty):\n # ensure Unique and UniqueTogether don't collide with a DB match\n validators = self.remove_validation_unique()\n validated_data = super().run_validation(data)\n # restore Unique or UniqueTogether\n self.restore_validation_unique(validators)\n return self.validated_data", "def test_validation(self):\n self.validationFails()", "def _validate(self):\n assert type(self.cmd) is dict\n assert self.cmd.keys() == {\"operation\",\"data\"}\n assert self.cmd[\"operation\"] == self._class_operation()", "def validate(self):\n if self.tba_key is None:\n self.log.error(\n \"You are missing the TBA-Key field. Please check https://github.com/team4099/scouting-data-ingest#tba for more information.\"\n )\n return False\n\n self.check_internet_connection()\n\n if self.year is None:\n self.log.error(\n \"You are missing the Year field. Please add one in the style shown below.\"\n )\n year_example = \"\"\"\n {\n \"Year\": \"2020\"\n }\n \"\"\"\n console.print(Syntax(year_example, \"json\"))\n console.print(\n \"Reference https://github.com/team4099/scouting-data-ingest#configuration for more information.\"\n )\n return False\n\n if self.google_credentials is None:\n self.log.error(\n \"You are missing the Google-Credentials field. Please check https://github.com/team4099/scouting-data-ingest#google-service-account-credentials-file for more information.\"\n )\n return False\n elif not os.path.isfile(f\"config/{self.google_credentials}\"):\n self.log.error(\n \"The file listed in the Google-Credentials field does not exist in the config folder. Please place it inside the config folder.\"\n )\n return False\n else:\n try:\n gc = gspread.service_account(f\"./config/{self.google_credentials}\")\n except ValueError as e:\n self.log.error(\n \"The file listed in the Google-Credentials Field is improper. See below for details.\"\n )\n self.log.error(e)\n return False\n\n if self.spreadsheet is None:\n self.log.error(\n \"You are missing the Spreadsheet field. Please check https://github.com/team4099/scouting-data-ingest#spreadsheet for more information.\"\n )\n return False\n else:\n try:\n gc.open(f\"{self.spreadsheet}\").get_worksheet(0)\n except gspread.exceptions.SpreadsheetNotFound:\n self.log.error(\n \"The file listed in the Spreadsheets field has not been shared with the service account. Please make sure it is.\"\n )\n return False\n\n if self.db_user is None:\n self.log.error(\n \"You are missing the Database User field. Please check https://github.com/team4099/scouting-data-ingest#mysql for more information.\"\n )\n return False\n\n if self.db_pwd is None:\n self.log.error(\n \"You are missing the Database Password field. Please check https://github.com/team4099/scouting-data-ingest#mysql for more information.\"\n )\n return False\n\n try:\n create_engine(\n f\"mysql+pymysql://{self.db_user}:{self.db_pwd}@{self.db_host}/scouting\"\n )\n except pymysql.err.OperationalError:\n self.log.error(\n \"Your Database user name and/or password is not correct. Please verify them.\"\n )\n\n if self.event is None:\n self.log.error(\n \"You are missing the Event field. Please check https://github.com/team4099/scouting-data-ingest#event for more information.\"\n )\n return False\n\n if (\n requests.get(\n f\"https://www.thebluealliance.com/api/v3/event/{self.year}{self.event}\",\n headers={\"X-TBA-Auth-Key\": self.tba_key},\n ).status_code\n == 404\n ):\n self.log.error(\n \"The event listed in the TBA-Key field is not valid. Please ensure the event key and year are correct.\"\n )\n return False\n\n if self.simulation:\n if self.simulator_url is None:\n self.log.error(\n \"You are missing the Simulator URL field. Please check https://github.com/team4099/scouting-data-ingest#tba for more information.\"\n )\n return False\n\n try:\n simulator_status = requests.get(\n f\"{self.simulator_url}/matches\"\n ).status_code\n except (\n ConnectionRefusedError,\n urllib3.exceptions.NewConnectionError,\n requests.exceptions.ConnectionError,\n ):\n self.log.error(\n \"The simulator may not be running or it's at a different url than the one provided.\"\n )\n return False\n\n if simulator_status == 401:\n self.log.error(\n \"The simulator may not be running. Please make sure it is and that it is up-to-date.\"\n )\n return False\n\n if self.simulator_spreadsheet is None:\n self.log.error(\n \"You are missing the Simulator Spreadsheet field. Please check https://github.com/team4099/scouting-data-ingest#spreadsheet for more information.\"\n )\n return False\n else:\n try:\n gc.open(f\"{self.simulator_spreadsheet}\").get_worksheet(0)\n except gspread.exceptions.SpreadsheetNotFound:\n self.log.error(\n \"The file listed in the Simulator Spreadsheet field has not been shared with the service account. Please make sure it is. Please also make sure the name entered is correct.\"\n )\n return False\n\n return True", "def run_validation(self, data=empty):\r\n # 验证空值\r\n (is_empty_value, data) = self.validate_empty_values(data)\r\n if is_empty_value:\r\n return data\r\n\r\n value = self.to_internal_value(data)\r\n try:\r\n self.run_validators(value)\r\n value = self.validate(value)\r\n assert value is not None, '.validate() should return the validated data'\r\n except (ValidationError, DjangoValidationError) as exc:\r\n raise ValidationError(detail=as_serializer_error(exc))\r\n\r\n return value", "def validate(self, data):\n destination = validate_branch_exists_in_city(data.get(\"destination\"))\n booking_station = validate_branch_exists_in_city(data.get(\"booking_station\"))\n if not destination:\n raise serializers.ValidationError(\n {\"errors\": {\"destination\": \"We don't have a branch in that city.\"}}\n )\n elif not booking_station:\n raise serializers.ValidationError(\n {\"errors\": {\"booking_station\": \"We don't have a branch in that city.\"}}\n )\n\n if destination.city == booking_station.city:\n raise serializers.ValidationError(\n {\"errors\": {\"destination\": \"You cannot send a parcel to the same origin.\"}}\n )\n\n data[\"destination\"] = destination\n data[\"booking_station\"] = booking_station\n\n recepient = User.objects.get_user(email=data.get(\"recepient\"))\n\n if not recepient:\n raise serializers.ValidationError(\n {\"detail\": \"There is no user registered with that email.\"}\n )\n\n data[\"recepient\"] = recepient\n\n return data", "def validate(self,data):\r\n for person in data:\r\n #check the format is a letter and 3 digit e.g A002 or a002\r\n \r\n if (re.match(r'[A-Z][0-9]{3}', (person[0]).lower())):\r\n print (person[0])\r\n else:\r\n print(person[0] + \" \" + 'is incorrect ID, '\r\n ' must contains a letter and 3 digits e.g a002')\r\n \r\n #check the format is either M/F/Male/Female\r\n \r\n if (person[1] == \"M\" or (person[1]).upper() == \"F\" or\r\n person[1] == \"Male\" or person[1] == \"Female\"):\r\n print (person[1])\r\n else:\r\n print(person[1] + \" \" + 'is incorrect Gender, '\r\n ' must either be M and Male or F and Female')\r\n \r\n #check age is valid entry and match with date\r\n \r\n if (re.match(r'[0-9]{2}', person[2]) and person[2] == self.valid_age(person[6])):\r\n print (person[2])\r\n elif (person[2] != self.valid_age(person[2])):\r\n print(\"Does not match with your birthday, invalid age\")\r\n else:\r\n print(person[2] + \" \" + 'age must be an integer')\r\n \r\n #check sales is 3 interger value\r\n if (re.match(r'[0-9]{3}', person[3])):\r\n return (person[3])\r\n else:\r\n print(person[3] + \" \" + 'is incorrect sales number, '\r\n 'must be a 2 interger number')\r\n \r\n #check BMI is either Normal / Overweight / Obesity or Underweight\r\n if (re.match(r'\\b(NORMAL|OVERWEIGHT|OBESITY|UNDERWEIGHT)\\b',(person[4]).upper())):\r\n print (person[4])\r\n else:\r\n print(person[4] + \" \" ' is incorrect BMI value, '\r\n 'must select from Normal, Overweight, Obesity or Underweight')\r\n \r\n #check Income is float\r\n\r\n if (re.match(r'[0-9]{2,3}', person[5])):\r\n print (person[5])\r\n else:\r\n print(person[5] + \" \" + 'is incorrect income, '\r\n 'must be a interger number') \r\n \r\n #check birthday\r\n \r\n if (self.valid_date(person[6]) and person[2] == self.valid_age(person[6]) ):\r\n print (person[6])\r\n else:\r\n print(person[2] + \" \" + 'is incorrect date format, '\r\n 'must contain DD-MM-YYYY or DD-MM-YY and seperated by -')\r\n \r\n return readFile", "def validate(self):\n\n # start validate\n self.model.eval()\n preds, labels = [], []\n for batch_idx, data in enumerate(self.valid_dataloader):\n # calculate and log losses\n losses_report, valid_preds, valid_labels = self.forward_one_batch(\n data)\n self._update_losses(losses_report, train=False)\n\n preds.append(valid_preds)\n labels.append(valid_labels)\n\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n # calculate and log metrics\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=False)\n\n # TODO: lr scheduler step setting\n self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg)\n\n # end validate\n self.model.train()", "def validate(self, data):\n request = self.context.get('request')\n data['poster'] = request.user\n\n return validate_complete_address(data)", "def validate(self,state,data):\n if self.VALIDATE:\n raise NotImplementedError(\"must override self.validate() when self.VALIDATE is defined\")\n return None", "def clean(self):\n cleaned_data = self.cleaned_data\n startTime = cleaned_data['start_time']\n endTime = cleaned_data['end_time']\n if startTime > endTime:\n msg = _(u'The start time must occur before the end time of the event')\n self._errors['start_time'] = self.error_class([msg])\n\n return cleaned_data", "def validate(self, data):\n start = data.get('start')\n duration = data.get('duration')\n\n if start and not duration:\n raise serializers.ValidationError(\n _('If you enter start date you must enter the duration'))\n\n if not data.get('google_calendar_published'):\n return data\n\n if not start or not duration:\n raise serializers.ValidationError(\n _('To publish in Google Calendar you must enter start '\n 'time and duration'))\n data['end'] = data.get('start') + timedelta(minutes=duration)\n\n return data", "def validate(self, data: Dict):\n for key in self.__dict__.keys():\n if not key.startswith('__') and key != 'id':\n if data[key] == '' or data[key] is None:\n raise ValidationError(\n message=f'{key} should not be \"{data[key]}\"'\n )", "def validate(self, data):\n start = data.get('start_date')\n\n if start < timezone.now():\n raise serializers.ValidationError('Start Date must be a future time.')\n\n movie = Movie.objects.get(id=data['movie'].id)\n room = Room.objects.get(id=data['room'].id)\n end = start + dt.timedelta(minutes=movie.duration)\n overlap_start = Showtime.objects.filter(room=room.id, start_date__gte=start, start_date__lte=end).count()\n overlap_end = Showtime.objects.filter(room=room.id, end_date__gte=start, end_date__lte=end).count()\n\n if overlap_start > 0 or overlap_end > 0:\n raise serializers.ValidationError(\"There is a showtime overlapped\")\n\n data['end_date'] = end # set up the end time to the show\n data['available'] = room.capacity # initialize availability with the room's size\n return data", "def validate(self, data):\n l = len(data[\"start_times\"])\n for i in range(l):\n if data[\"start_times\"][i]>=data['end_times'][i]:\n raise serializers.ValidationError(\"Start times should come before end times\") \n return data", "def validate(self):\r\n return 1", "def validate(self):\r\n return 1", "def validate(self):\r\n return 1", "def validate(self):\r\n return 1", "def validate(self):\r\n return 1", "def test_validate(self):\n pass", "def is_valid(self, data_model: DataModel) -> bool:", "def validate(self):\n self._check_type()", "def clean(self):\n cleaned_data = super(ManageLearnersForm, self).clean()\n\n # Here we take values from `data` (and not `cleaned_data`) as we need raw values - field clean methods\n # might \"invalidate\" the value and set it to None, while all we care here is if it was provided at all or not\n email_or_username = self.data.get(self.Fields.EMAIL_OR_USERNAME, None)\n bulk_upload_csv = self.files.get(self.Fields.BULK_UPLOAD, None)\n\n if not email_or_username and not bulk_upload_csv:\n raise ValidationError(ValidationMessages.NO_FIELDS_SPECIFIED)\n\n if email_or_username and bulk_upload_csv:\n raise ValidationError(ValidationMessages.BOTH_FIELDS_SPECIFIED)\n\n if email_or_username:\n mode = self.Modes.MODE_SINGULAR\n else:\n mode = self.Modes.MODE_BULK\n\n cleaned_data[self.Fields.MODE] = mode\n cleaned_data[self.Fields.NOTIFY] = self.clean_notify()\n\n self._validate_course()\n self._validate_program()\n\n if self.data.get(self.Fields.PROGRAM, None) and self.data.get(self.Fields.COURSE, None):\n raise ValidationError(ValidationMessages.COURSE_AND_PROGRAM_ERROR)\n\n return cleaned_data", "def validate(self) -> None:\n\n if self.field not in self.model.table_fields:\n raise ValueError(f\"Value field {self.field} not present in {self.model.table}\")\n\n if self.pivot:\n if self.pivot not in self.model.table_fields:\n raise ValueError(\n f\"Pivot field {self.pivot} not present in {self.model.table}\"\n )\n\n if self.connector:\n if self.connector not in self.model.table_fields:\n raise ValueError(\n f\"Connector field {self.connector} not present in {self.model.table}\"\n )\n\n for field in self.selectors:\n if field not in self.model.table_fields:\n raise ValueError(f\"Selector field {field} not present in {self.model.table}\")", "def validate(self, data):\n\n if data['sinceWhen'] > data['tilWhen']:\n raise serializers.ValidationError(\"sinceWhen must precede after tilWhen\")\n \n return data", "def validate(self, test_data):\n if not isinstance(test_data, np.number):\n raise ValidationError('Invalid type/value.', 'numpy.number',\n type(test_data))\n if self.max_value is not None and test_data > self.max_value:\n raise ValidationError('Maximum value exceeded.',\n self.max_value, test_data)\n if self.min_value is not None and test_data < self.min_value:\n raise ValidationError('Minimum value undercut.',\n self.min_value, test_data)\n if test_data.dtype != self.dtype:\n raise ValidationError('Invalid dtype.', self.dtype,\n test_data.dtype)", "def test_validating_data_object(self):\n proc = Process.objects.create(\n name=\"Test process\",\n contributor=self.user,\n input_schema=[\n {\"name\": \"value\", \"type\": \"basic:integer:\", \"required\": True}\n ],\n output_schema=[\n {\"name\": \"result\", \"type\": \"basic:string:\", \"required\": True}\n ],\n )\n\n data = {\n \"name\": \"Test data\",\n \"contributor\": self.user,\n \"process\": proc,\n }\n\n with self.assertRaisesRegex(ValidationError, '\"value\" not given'):\n validate_data_object(Data.objects.create(input={}, **data))\n\n with self.assertRaisesRegex(ValidationError, \"Required fields .* not given\"):\n validate_data_object(Data.objects.create(input={}, **data))\n\n d = Data.objects.create(input={\"value\": 42}, **data)\n\n d.status = Data.STATUS_DONE\n with self.assertRaisesRegex(ValidationError, '\"result\" not given'):\n d.save()\n validate_data_object(d)\n\n d.output = {\"result\": \"forty-two\"}\n d.save()\n validate_data_object(d)", "def validate(self, sess, valid_dataset):\n return self.test(sess, valid_dataset)", "def validate(self, name, values):\r\n \r\n pass" ]
[ "0.7545616", "0.75082064", "0.7449879", "0.7382325", "0.7261222", "0.72052073", "0.72052073", "0.72052073", "0.72052073", "0.72052073", "0.72052073", "0.72052073", "0.72052073", "0.7128559", "0.71273035", "0.71273035", "0.7121654", "0.7115019", "0.70944667", "0.70944667", "0.7008111", "0.6969461", "0.696921", "0.6966596", "0.6924723", "0.6910728", "0.6901777", "0.68876654", "0.6885054", "0.6866291", "0.68274903", "0.6798002", "0.67774165", "0.67739695", "0.6760978", "0.6722126", "0.67085314", "0.6708373", "0.67049015", "0.6678909", "0.66734076", "0.6670608", "0.6653343", "0.66439205", "0.6640474", "0.6631912", "0.6564496", "0.656234", "0.65598506", "0.6541808", "0.65225214", "0.65155774", "0.6510442", "0.6494966", "0.6494888", "0.6484642", "0.6481135", "0.6462031", "0.6458726", "0.6434021", "0.6427067", "0.642658", "0.642658", "0.63928443", "0.638516", "0.6384532", "0.6384128", "0.63831997", "0.6379625", "0.63708127", "0.6361388", "0.63493234", "0.6343887", "0.6335385", "0.6334862", "0.6322333", "0.6318806", "0.63111705", "0.63042897", "0.62998927", "0.6298945", "0.6298263", "0.6295608", "0.62922025", "0.628829", "0.62821305", "0.62821305", "0.62821305", "0.62821305", "0.62821305", "0.62756705", "0.6272052", "0.62705636", "0.62670743", "0.6265639", "0.6264972", "0.62557495", "0.6253251", "0.6249771", "0.6248791" ]
0.62555313
97
Test data for this task
def test_data(self): return self._test_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTestData(self):\n raise NotImplementedError", "def test_process_data(self):\n pass", "def test_data(self, data):\n print('-'*30)\n print('Starting test: {}'.format(data['name']))\n self.set_resolution(data['resolution']['width'], data['resolution']['height'])\n self.test_actions(data['actions'])\n print('Test finished')\n print('-'*30)", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def _load_test_data(self):\n self._save_test_data()", "def getTestResults():", "def test_data(self):\n if self._test_data is None:\n self._load_test_data()\n if self._swapped_test_data is None:\n self._swapped_test_data = {}\n for key, value in self._test_data.items():\n self._swapped_test_data[key] = value\n return self._swapped_test_data", "def get_test_data(self, topic):\n raise NotImplementedError(\"{} must override step()\".format(self.__class__.__name__))", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "async def populate_test_data(self):\n async with (await self._get_connection_pool()).acquire() as conn:\n await conn.execute('delete from foglamp.tasks')\n await conn.execute('delete from foglamp.schedules')\n await conn.execute('delete from foglamp.scheduled_processes')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep1', '[\"python3\", \"../scripts/sleep.py\", \"1\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep10', '[\"python3\", \"../scripts/sleep.py\", \"10\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep30', '[\"python3\", \"../scripts/sleep.py\", \"30\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep5', '[\"python3\", \"../scripts/sleep.py\", \"5\"]')''')", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def setUp(self):\n self.dataset = get_test_dataset()", "def get_test_data():\n\n # test set\n test = pd.read_csv(\"test.csv\")\n\n return test", "def tests():", "def test_alien_data(self):", "def test_batch(self):\n pass", "def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])", "def get_test_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'test')", "def getTestSet(self):\r\n return self.fTestData", "def test_data_in_param(self):", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def test_data_manipulation(self):\n target_name = self.project['target']['name']\n self.api_mock.return_value.get_metadata.return_value = [\n {'_id': '0',\n 'pid': '1',\n 'created': datetime.datetime.now(),\n 'name':'universe',\n 'originalName': 'credit-sample-200.csv',\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '1',\n 'pid': '1',\n 'name':'test',\n 'originalName': 'credit-sample-200.csv',\n 'created': datetime.datetime.now(),\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '2',\n 'pid': '1',\n 'name':'new',\n 'created': datetime.datetime.now(),\n 'originalName': 'credit-sample-200.csv',\n 'newdata':True,\n 'controls':{},\n 'shape': [2, 100],\n 'varTypeString': 'NN',\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}}]\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1',\n 'command': 'fit', 'max_reps': 0,\n 'samplepct': 100})\n\n #target\n #this will map the target values to (0,1) because target type is Binary\n target_vector = self.dataprocessor.target_vector()\n target_series = target_vector['main']\n self.assertItemsEqual(np.unique(target_series), [0,1])\n\n #this will be none because 'holdout_pct' isn't set in the project data\n self.assertIsNone(target_vector['holdout'])\n\n #prediction dataset\n predictors = self.dataprocessor.predictors()\n pred_dataframe = predictors['1']['main']\n self.assertItemsEqual(list(pred_dataframe.columns), [\"age\"])\n self.assertEqual(self.dataprocessor.get_vartypestring_without_target('1'), \"N\")\n\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1', 'scoring_dataset_id': '2', 'command': 'predict', 'max_reps': 0, 'samplepct':100})\n dp2 = DataProcessor(request)\n data = dp2.request_datasets()\n self.assertEqual(data.keys(), ['1'])\n self.assertEqual(data['1'].keys(), ['scoring', 'vartypes'])\n scoring_data = data['1']['scoring']\n vartypes = data['1']['vartypes']\n self.assertEqual(list(scoring_data.columns), [\"age\"])\n self.assertEqual(vartypes, \"N\")", "def test_get_run(self):\n pass", "def runtest(self):", "def test_data():\n current_dir = os.path.dirname(os.path.abspath(__file__))\n test_data_dir = os.path.join(current_dir, \"test_data\")\n\n return pd.read_csv(os.path.join(test_data_dir, \"test_data_6m.csv\"))", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def setUp(self):\n\n pwd = self.get_script_path()\n self.test_drug_info_file = pwd+'/../insight_testsuite/tests/my_test/input/test_input_file.txt'\n self.test_raw_tuple= [('jordanmichael', 'A', 23.00),\n ('jameslebron', 'C', 23.10),\n ('bryantkobe', 'B', 8),\n ('bryantkobe', 'C', 24.9)]\n self.test_sorted_tuple = sorted(self.test_raw_tuple, key=operator.itemgetter(1))\n #print self.test_sorted_tuple\n self.test_dict = {'C':2, 'A':1, 'B':1}\n self.test_num_unique_name = [1, 1, 2]\n self.test_total_cost_each_drug = [23.00,8.00,48.00]\n self.test_output_file = pwd+'/../insight_testsuite/tests/my_test/output/test_output_file_1.txt'", "def setUpTestData(cls):\n # volunteer user\n common.initialize_empty_volunteer()", "def setUp(self):\n self.test_data = self.read_data('test_data/clients.txt')", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def test(self, dataset):\n\n outputs, errors = self.use(dataset)\n\n ## PUT CODE HERE ##\n # I put the code in the \"use\" function, seems better :-)\n\n return outputs, errors", "def test_init(self):\n test_data = (\n (self.segment.input_file, self.EXPECTED_INPUT_FILE,\n \"input file = {v}\".format(v=self.EXPECTED_INPUT_FILE)),\n (self.segment.punch_in, self.EXPECTED_PUNCH_IN,\n \"punch in = {v}\".format(v=self.EXPECTED_PUNCH_IN)),\n (self.segment.punch_out, self.EXPECTED_PUNCH_OUT,\n \"punch out = {v}\".format(v=self.EXPECTED_PUNCH_OUT)),\n (self.segment.input_stream, self.EXPECTED_INPUT_STREAM,\n \"input stream = {v}\".format(v=self.EXPECTED_INPUT_STREAM)),\n (self.segment._temp_file, self.EXPECTED_TEMP_FILE,\n \"temp file = {v}\".format(v=self.EXPECTED_TEMP_FILE)),\n (self.segment._temp_suffix, self.EXPECTED_TEMP_SUFFIX,\n \"temp suffix = {v}\".format(v=self.EXPECTED_TEMP_SUFFIX)),\n (self.segment._temp_files_list, self.EXPECTED_TEMP_LIST,\n \"temp files list = {v}\".format(v=self.EXPECTED_TEMP_LIST)),\n (self.segment._TYPE, self.EXPECTED_TYPE,\n \"type = {v}\".format(v=self.EXPECTED_TYPE)),\n (self.segment._TRIM, self.EXPECTED_TRIM,\n \"trim = {v}\".format(v=self.EXPECTED_TRIM)),\n (self.segment._SETPTS, self.EXPECTED_SETPTS,\n \"setpts = {v}\".format(v=self.EXPECTED_SETPTS)),\n )\n for actual, expected, description in test_data:\n with self.subTest(msg=description):\n self.assertEqual(actual, expected)", "def fixture_example_data():\n import_example_data()", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def test_get_results(self):\n pass", "def testData(self, ):\n count = 0\n while count < len(self.RAD_sequences_test):\n RAD_filename = self.RAD_sequences_test[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"test_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def generate_test_data(self):\n self.message('Generating {} rows of unique keyed test data.'.format(self.test_data_row_count))\n if not self.open_workbooks():\n exit()\n\n # populate our data dump input files\n self.populate_sheet(self.wb_incident, self.wb_incident.active, self.fn_incident, 'Hypercare Incidents', 'INC')\n self.populate_sheet(self.wb_enhancement, self.wb_enhancement.active, self.fn_enhancement,\n 'Hypercare Enhancements', 'ENH')\n self.populate_sheet(self.wb_defect, self.wb_defect.active, self.fn_defect, 'Hypercare Defects', 'DFC')\n self.populate_sheet(self.wb_alm, self.wb_alm.active, self.fn_alm, 'ALM Defects', 'ALM')\n\n self.message('Completed generating input file')", "def test_data():\n return {\"David Andrews\" : [200.50, 400.00, 250.75],\n \"John Goodfellow\" : [25.00, 175.50],\n \"Mary Suzuki\" : [75.00, 125.00, 250.00],\n \"Bonney Lake\" : [500.50, 700.75, 500.25],\n \"DeMarcus Rollins\" : [155.00, 165.00]\n }", "def Test_data():\n print (\"loading test data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n\n with h5py.File(join(data_root, './data/test_real2.h5')) as f:\n test_real = f['test_real'][:]\n with h5py.File(join(data_root, './data/test_imag2.h5')) as f:\n test_imag = f['test_imag'][:]\n test_real = np.transpose(test_real, (0, 1, 3, 2))\n test_imag = np.transpose(test_imag, (0, 1, 3, 2))\n test_data = test_real+1j*test_imag\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end - time_start))\n return test_data", "def load_testing_data(self) -> List[np.ndarray]:\n input_data = self._load_set(config.TEST_DIR, False)\n return input_data", "def test(self, test_data):\n with open(test_data, 'r') as test_data:\n results = {}\n for type in self.label_type_map:\n results[self.label_type_map[type]] = []\n while True:\n tokens = test_data.readline().split()\n pos = test_data.readline().split()\n indices = test_data.readline().split()\n if not tokens or not pos or not indices:\n break\n curr_results = self.viterbi(tokens)\n intervals = self.extract_intervals(curr_results, indices)\n for type in intervals:\n for interval in intervals[type]:\n results[type].append(interval)\n self.write_results(results)", "def setUp(self):\n with open('tests/data/code.txt') as f:\n self.sf = StreamerFlowchart('Test', f.read())\n with open('tests/data/code_result.txt') as f:\n self.expected = f.read()[:-1] # don't want the empty line", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def test_generate_all_testing(self):\n pass", "def test_get_scenarios(self):\n pass", "def preprocess_test_data(self):\r\n print(\"* Preprocessing test data.\", flush=True)\r\n prep.create_HDF_file(self.C.test_set)\r\n\r\n self.print_time_elapsed()", "def get_testing_data():\n \n def _get_testing_data(data_csv, platemap_csv, data_type, size, pkl_file):\n \n with open(pkl_file, 'rb') as file: # load the list with expexcted data frames from .pkl file\n expected_list = pickle.load(file)\n\n actual_output = fa.read_in_envision(data_csv = data_csv, platemap_csv=platemap_csv, data_type=data_type, size=size) # execute the tested function\n actual_g = actual_output.g_factor\n actual_list = []\n\n for repeat in actual_output.data_dict.values(): # unpack the dictionary with df from the tested function\n metadata, data = repeat.values()\n p_channel, s_channel = data.values()\n actual_list.append(metadata)\n actual_list.append(p_channel)\n actual_list.append(s_channel)\n \n return actual_list, expected_list, actual_g\n \n return _get_testing_data", "def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)", "def runTests(self):\n \n pass", "def setUp(self):\n\n # Create a data pipe.\n self.interpreter.pipe.create('test', 'mf')\n\n # Create a temporary file name.\n ds.tmpfile = mktemp()", "def test_training(self):\n\t\tpass", "def fetch_test_data(self):\r\n self.fetch_attribute_list()\r\n self.type_conversion()\r\n # Connect to sensor database.\r\n connection = db.Connection(host=\"localhost\", user=\"root\", db=\"sensor\")\r\n dbhandler = connection.cursor()\r\n test_data = []\r\n for i in range(0, len(self.attribute_list)):\r\n # If the value is in user_info.json file.\r\n if self.attribute_list[i] in self.form_attribute_keys == True:\r\n test_data.append(self.form_attribute[self.attribute_list[i]])\r\n else:\r\n # Value is in database.\r\n # Reconstruct attribute name to match with table column name.\r\n self.attribute_list[i] = self.attribute_list[i].replace(' ', '_')\r\n self.attribute_list[i] = self.attribute_list[i].lower()\r\n # Fetch last inserted value of that attribute.\r\n query = \"select `\"\r\n query += (self.attribute_list[i] + \"`FROM `\" + self.attribute_list[i]\r\n + \"` ORDER BY `\" + self.attribute_list[i] + \"` DESC LIMIT 1\")\r\n dbhandler.execute(query=query)\r\n value = dbhandler.fetchall()\r\n print value\r\n # Append to test_data\r\n test_data.append(value[0][0])\r\n print test_data\r\n return np.array(test_data)", "def setup(self):\n self.rows = test_helpers.fetch_sample_teradata_rows()\n self.csv_path = 'not/a/real/path'", "def setUpTestData(cls) -> None:\n\n # Define base url\n cls.url = BASE_URL + '/'\n\n # Make 9 \"normal\" authors.\n cls.authors: typing.List[Author] = [\n create_author() for _ in range(9)\n ]\n\n # Make 1 superuser author.\n cls.super_author: Author = create_author(True)\n\n # Serialize data once so that it's not called in ever test\n cls.serialized_data = AuthorListSerializer(Author.objects.all(), many=True).data", "def define_testdata():\n wata_dict = {\n # info taken from main_hdr dict\n 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'],\n 'date_obs': ['2022-06-22'],\n 'visit_id': ['V09999001001P0000000002101'],\n 'tafilter': ['F110W'],\n 'detector': ['NRS1'],\n 'readout': ['NRSRAPID'],\n 'subarray': ['FULL'],\n # info taken from ta_hdr dict\n 'ta_status': ['SUCCESSFUL'],\n 'status_reason': ['-999'],\n 'star_name': ['-999'],\n 'star_ra': [-999.0],\n 'star_dec': [-999.0],\n 'star_mag': [-999.0],\n 'star_catalog': [-999],\n 'planned_v2': [-999.0],\n 'planned_v3': [-999.0],\n 'stamp_start_col': [-999],\n 'stamp_start_row': [-999],\n 'star_detector': ['-999'],\n 'max_val_box': [-999.0],\n 'max_val_box_col': [-999.0],\n 'max_val_box_row': [-999.0],\n 'iterations': [-999],\n 'corr_col': [-999.0],\n 'corr_row': [-999.0],\n 'stamp_final_col': [-999.0],\n 'stamp_final_row': [-999.0],\n 'detector_final_col': [-999.0],\n 'detector_final_row': [-999.0],\n 'final_sci_x': [-999.0],\n 'final_sci_y': [-999.0],\n 'measured_v2': [-999.0],\n 'measured_v3': [-999.0],\n 'ref_v2': [-999.0],\n 'ref_v3': [-999.0],\n 'v2_offset': [-999.0],\n 'v3_offset': [-999.0],\n 'sam_x': [-999.0],\n 'sam_y': [-999.0],\n }\n # create the additional arrays\n bool_status, status_colors = [], []\n for tas, do_str in zip(wata_dict['ta_status'], wata_dict['date_obs']):\n if 'unsuccessful' not in tas.lower():\n bool_status.append(1)\n status_colors.append('blue')\n else:\n bool_status.append(0)\n status_colors.append('red')\n\n # add these to the bokeh data structure\n wata_dict['ta_status_bool'] = bool_status\n wata_dict['status_colors'] = status_colors\n\n # create the dataframe\n wata_data = pd.DataFrame(wata_dict)\n return wata_data", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def test_get(self):\n # Start sampling\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step1.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data((DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1.txt.result.yml', count=2, timeout=10)\n\n # there is only one file we read from, this example 'appends' data to\n # the end of the node59p1.dat file, and the data from the new append\n # is returned (not including the original data from _step1)\n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step2.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(DostadParserTelemeteredDataParticle, 'test_data_2.txt.result.yml',\n count=1)\n\n # now 'appends' the rest of the data and just check if we get the right number\n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step4.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(DostadParserTelemeteredDataParticle, count=4)", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def setUpTestData(cls):\n User.objects.create_user('Claire', 'claire@email.com', '12345678')\n User.objects.create_user('Georgie', 'georgie@email.com', '12345678')\n User.objects.create_user('Tristan', 'tristan@email.com', '12345678')\n\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 1\",\n category=\"Food\",\n amount=20,\n converted_amount=20,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Georgie\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 2\",\n category=\"Food\",\n amount=10,\n converted_amount=10,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Claire\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 3\",\n category=\"Food\",\n amount=30,\n converted_amount=30,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Tristan\"\n )", "def test_set_data_attributes(self):\n\n self.mediator.get_results()", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def setUp(self):\n\n self.data_list = [\n \"hello\", \"world\", \"funilrys\", \"funceble\", \"PyFunceble\", \"pyfunceble\"\n ]\n self.data = \"Hello, this is Fun Ilrys. I just wanted to know how things goes around the tests.\" # pylint: disable=line-too-long", "def fixtures():", "def test_get_task_output(self):\n pass", "def get_test_examples(self, data_dir):\n \n raise NotImplementedError()", "def setUpTestData(cls):\n cls.user = UserFactory()\n cls.auth = AuthFactory()\n\n cls.device = TOTPDevice.objects.create(user=cls.user)\n cls.relate = TOTPDevice.challenge.objects.create(\n device=cls.device, token=cls.auth\n )\n\n cls.algorithm = TOTPAlgorithm()", "def test_run(self):\n sut = ExperimentEmail()\n train = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n val = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n outdir = tempfile.mkdtemp()\n\n # Act\n sut.run(train, val, outdir, batch_size=32, epochs=2)", "def test_parse_import_ticket_data_2(self):\n self.ticket_dict1[\"host_genus\"] = \"Mycobacterium\"\n self.ticket_dict1[\"cluster\"] = \"A\"\n self.ticket_dict1[\"subcluster\"] = \"A2\"\n self.ticket_dict1[\"accession\"] = \"ABC123\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set([\"subcluster\", \"host_genus\",\n \"cluster\", \"accession\"]))", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def test_list_runs(self):\n pass", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_got_data(self):\n # Create and initialize the instrument driver with a mock port agent\n driver = InstrumentDriver(self._got_data_event_callback)\n self.assert_initialize_driver(driver)\n\n self.assert_raw_particle_published(driver, True)\n\n # validating data particles are published\n self.assert_particle_published(driver, self.RASFL_STATUS_DATA, self.assert_data_particle_status, True)\n self.assert_particle_published(driver, self.RASFL_SAMPLE_DATA1, self.assert_data_particle_sample, True)\n \n # validate that a duplicate sample is not published\n self.assert_particle_not_published(driver, self.RASFL_SAMPLE_DATA1, self.assert_data_particle_sample, True)\n \n # validate that a new sample is published\n self.assert_particle_published(driver, self.RASFL_SAMPLE_DATA2, self.assert_data_particle_sample, False)", "def test_parse_import_ticket_data_1(self):\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set([\"host_genus\"]))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set([\"cluster\"]))\n with self.subTest():\n self.assertEqual(tkt.data_parse, set([\"accession\"]))\n with self.subTest():\n self.assertEqual(tkt.data_add, set([\"subcluster\"]))", "def test_image(self):\r\n self.testdata = open(TESTDATA_FILENAME).read()", "def test_basic(self):\n telem = self.create_logs(self.user1,\n num=10,\n start=self.year2000,\n altitude=100,\n heading=90)\n\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n self.assertEqual(len(telem), len(data))\n\n lats = []\n lons = []\n times = []\n\n for entry in data:\n self.assertEqual(self.user1.pk, entry['user'])\n self.assertEqual(100, entry['altitude_msl'])\n self.assertEqual(90, entry['heading'])\n\n # All entries are unique\n self.assertNotIn(entry['timestamp'], times)\n times.append(entry['timestamp'])\n\n self.assertNotIn(entry['latitude'], lats)\n lats.append(entry['latitude'])\n\n self.assertNotIn(entry['longitude'], lons)\n lons.append(entry['longitude'])", "def setUp(self):\n self.dataset = self.dataset_cls()", "def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults", "def fixture_evaluation_details_example():\n test_example = [\n EvaluationJob(\n name=EVALUATION_JOB_NAME,\n evaluation_observation=EVALUATION_OBSERVATION,\n datasets=EVALUATION_DATASETS,\n metadata=EVALUATION_METADATA,\n metric_groups=EVALUATION_METRIC_GROUPS,\n )\n ]\n return test_example", "def newTestData(self):\n self.newTab( extension = TestData.TYPE, repoDest=UCI.REPO_UNDEFINED )", "def test_create_data(self):\n process = Process.objects.filter(slug=\"test-min\").latest()\n data = Data.objects.create(\n name=\"Test data\",\n contributor=self.contributor,\n process=process,\n )\n\n data.refresh_from_db()\n self.assertEqual(data.status, Data.STATUS_DONE)", "def test_get_stats(self):\n pass", "def run_tests():\n with open(FILENAME) as file:\n\n # Loads the test hyper-parameters as dictionaries.\n tests = yaml.safe_load(file)\n \n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results[\"Episode\"] = \"\"\n results['Max average score'] = \"\"\n\n for i, test in enumerate(tests['Tests']):\n\n env = gym.make(test['env'])\n env.reset()\n\n actor_critic = ActorCritic(env, test['episodes'], test['max_score'], \n test['hidden_size'], test['gamma'], test['save'])\n\n ## run training \n best_score, episode, rew_hist = actor_critic.train()\n\n results.loc[i,'Episode'] = episode\n results.loc[i,'Max average score'] = best_score\n\n plot_graphs(test, rew_hist)\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n return results", "def test_case_data(self, index):\n return self._db_logger.get_test_case_data(index=index)" ]
[ "0.77121603", "0.74460846", "0.73213947", "0.7164332", "0.7164332", "0.7164332", "0.7164332", "0.714354", "0.7103978", "0.69678515", "0.68217576", "0.68121225", "0.6750978", "0.67116654", "0.67001045", "0.66699296", "0.6623456", "0.66211283", "0.6617969", "0.6593253", "0.6550207", "0.65255433", "0.6525252", "0.65185773", "0.6505959", "0.65020216", "0.6501809", "0.6496141", "0.6485021", "0.6480122", "0.6461655", "0.6449671", "0.6443126", "0.6439868", "0.64337456", "0.6429708", "0.64045286", "0.63937414", "0.63931", "0.636556", "0.6363143", "0.63509923", "0.6349777", "0.6342677", "0.6330585", "0.63274515", "0.6323819", "0.630822", "0.63030195", "0.62908524", "0.62887305", "0.62887305", "0.62887305", "0.62887305", "0.62887305", "0.62862134", "0.6273587", "0.62582386", "0.62521493", "0.62398285", "0.623861", "0.62286127", "0.62229824", "0.6215012", "0.621368", "0.6208048", "0.6200624", "0.6197626", "0.6197441", "0.61937124", "0.61937124", "0.61886626", "0.61853796", "0.618309", "0.6174523", "0.6166505", "0.6166505", "0.6166505", "0.61657023", "0.61627096", "0.61603796", "0.6145121", "0.6142508", "0.6140575", "0.6137853", "0.6134775", "0.6132814", "0.613217", "0.6126939", "0.6124216", "0.6111275", "0.61112607", "0.610936", "0.610842", "0.60992897", "0.60987645", "0.60918003", "0.60899746", "0.6089622", "0.6085908" ]
0.75773656
1
Collater to make batches
def collate_fn(self, *args): return TupleMiniBatch(default_collate(*args))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _collater(batch):\n return batch[0]", "def collater(self, samples):\r\n raise NotImplementedError", "def produce_query_batches(self):\n pass", "def trivial_batch_collator(batch):\n return batch", "def trivial_batch_collator(batch):\n return batch", "def trivial_batch_collator(batch):\n return batch", "def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]", "def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch", "def customize_collate(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n # this is the main part to handle varied length data in a batch\n # batch = [data_tensor_1, data_tensor_2, data_tensor_3 ... ]\n # \n batch_new = pad_sequence(batch)\n \n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n\n # allocate the memory based on maximum numel\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n # this will go to loop in the last case\n return customize_collate([torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n \n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: customize_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple\n return elem_type(*(customize_collate(samples) \\\n for samples in zip(*batch)))\n elif isinstance(elem, container_abcs.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n \n # zip([[A, B, C], [a, b, c]]) -> [[A, a], [B, b], [C, c]]\n transposed = zip(*batch)\n return [customize_collate(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def customize_collate_from_batch(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n batch_new = pad_sequence(batch) \n out = None\n if torch.utils.data.get_worker_info() is not None:\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n # here is the difference\n return torch.cat(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n return customize_collate_from_batch(\n [torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, tuple):\n # concatenate two tuples\n tmp = elem\n for tmp_elem in batch[1:]:\n tmp += tmp_elem \n return tmp\n elif isinstance(elem, container_abcs.Sequence):\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n transposed = zip(*batch)\n return [customize_collate_from_batch(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def prepare_batch(data,BATCH_SIZE, filename):\n ### select the last BATCH_SIZE rows from batch dataset\n batch = data.iloc[-BATCH_SIZE:].values.tolist()\n batch_data = []\n \n ### remove white spaces in the list because predtion server expects no white spaces between elements\n for i in batch:\n str_row = str(i)\n str_row = str_row.replace(' ','')\n batch_data.append(str_row)\n \n ### write values in a file called filename\n with open(filename, 'w') as f:\n f.write(','.join(str(i) for i in batch_data))", "def collate_fn(batch):\n\n flattened_batch = []\n for data in batch:\n num_examples = len(data['image'])\n for i in range(num_examples):\n flattened_batch.append({\n k: v[i] for k, v in data.items()\n })\n\n return default_collate(flattened_batch)", "def collate_batch(self) -> Dict[str, Any]:\n pass", "def _collate_fn(batch):\r\n batch = list(zip(*batch))\r\n batch[0] = torch.stack(batch[0])\r\n batch[1] = list(batch[1])\r\n batch[2] = torch.stack(batch[2])\r\n return tuple(batch)", "def transform_batch(self, batch: Dict[str, Any], results: Dict[str, Any] = None, cls_is_bos=False,\n sep_is_eos=False) -> Dict[str, Any]:\n return batch", "def collate_fn(self, batch):\r\n batch = list(map(torch.stack, zip(*batch)))\r\n max_seq_len = torch.max(torch.sum(batch[1], 1)).item()\r\n for i in range(len(batch) - 1):\r\n if batch[i].size()[1] > max_seq_len:\r\n batch[i] = batch[i][:, :max_seq_len]\r\n if self.truncate_label:\r\n batch[-1] = batch[-1][:, :max_seq_len]\r\n return batch", "def batchify(batch):\n\n PAD_ID = batch[0]['<PAD>']\n inputs_list = [ex['input'] for ex in batch]\n max_length_list = []\n for docs in inputs_list:\n max_length = max([len(doc[1]) for doc in docs])\n max_length_list.append(max_length)\n inputs = []\n for index,docs in enumerate(inputs_list):\n bat_size = len(docs)\n tp_vecs = torch.zeros((bat_size,max_length_list[index]),dtype=torch.long)\n tp_vecs += PAD_ID\n for k,doc in enumerate(docs):\n for j,word in enumerate(doc[1]):\n tp_vecs[k,j] = word\n tp_list = [doc[0] for doc in docs]\n tp_list = torch.tensor(tp_list,dtype=torch.long)\n inputs.append([tp_list,tp_vecs])\n week_index_list = torch.tensor([ex['target'][0] for ex in batch],dtype=torch.long)\n word_index_list = torch.tensor([ex['target'][1] for ex in batch],dtype=torch.long)\n targets = (week_index_list,word_index_list)\n return inputs,targets", "def batchify(fn, chunk):\n if chunk is None:\n return fn\n\n def ret(inputs, styles, alpha, feature):\n results = []\n for i in range(0, inputs.shape[0], chunk):\n input_chunk = inputs[i:i + chunk]\n style_chunk = styles[i:i + chunk]\n alpha_chunk = alpha[i:i + chunk] if alpha is not None else None\n feature_chunk = feature[i:i + chunk] if feature is not None else None\n results.append(fn(input_chunk, style_chunk, alpha_chunk, feature_chunk))\n return torch.cat(results, 0)\n return ret", "def custom_collate_fn(batch):\n images, bboxes, context_indices, labels = zip(*batch)\n # images = (img_1, ..., img_N) each element of size [3, img_H, img_W]\n # bboxes = (bboxes_1, ..., bboxes_N) each element of size [n_bboxes_in_image, 4]\n # context_indices = (ci_1, ..., ci_N) each element of size [n_bboxes_in_image, 2*context_size]\n # labels = (labels_1, ..., labels_N) each element of size [n_bboxes_in_image]\n \n images = torch.stack(images, 0)\n \n bboxes_with_batch_index = []\n observed_bboxes = 0\n for i, bbox in enumerate(bboxes):\n batch_indices = torch.Tensor([i]*bbox.shape[0]).view(-1,1)\n bboxes_with_batch_index.append(torch.cat((batch_indices, bbox), dim=1))\n context_indices[i][context_indices[i] != -1] += observed_bboxes\n observed_bboxes += bbox.shape[0]\n bboxes_with_batch_index = torch.cat(bboxes_with_batch_index)\n context_indices = torch.cat(context_indices)\n \n labels = torch.cat(labels)\n \n return images, bboxes_with_batch_index, context_indices, labels", "def dynamic_padding_collate_fn(batch_list):\n batch_uncollated = [[] for i in range(3)]\n\n for features in batch_list:\n length = features[1].sum().item()\n for i, feature in enumerate(features):\n batch_uncollated[i].append(feature[:length])\n\n batch_collated = []\n for batch in batch_uncollated:\n batch_collated.append(pad_sequence(batch, batch_first=True))\n\n return batch_collated", "def _collate_fn(batch):\n # imgs = [b[0] for b in batch]\n # labels = [b[1] for b in batch]\n # imgs = torch.stack(imgs, dim=0)\n # return [imgs, labels]\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n imgs = torch.cat(imgs, dim=0)\n labels = [l for sublist in labels for l in sublist]\n return [imgs, labels]", "def _batcher(self, rows):\n row_count = 0\n batch = []\n batch_count = 1\n\n total_rows_modified = 0\n throttle_count = 0\n\n i = 0\n for row in rows:\n if row_count > self.batch_size - 1:\n logger.debug(f\"row_count={row_count} batch_size={self.batch_size} and batch={len(batch)}\")\n # Yield the previous batch\n yield batch\n\n # Start the new batch\n batch = []\n batch.append(row)\n row_count = 1\n\n batch_count += 1\n # break # toggle to load one batch only\n else:\n row_count += 1\n batch.append(row)\n\n # Put in a sleep timer to throttle how hard we hit the database\n if self.throttle_time and self.throttle_size and (throttle_count > self.throttle_size - 1):\n logger.info(f\"Sleeping for {self.throttle_time} seconds... row: {i}\")\n time.sleep(int(self.throttle_time))\n throttle_count = 0\n elif self.throttle_time and self.throttle_size:\n throttle_count += 1\n i += 1\n\n yield batch", "def individual_collate(batch):\n\n data = batch\n\n collected_data = defaultdict(list)\n\n for i in range(len(list(data))):\n for k in data[i].keys():\n collected_data[k].append(data[i][k])\n\n for k in collected_data.keys():\n collected_data[k] = torch.stack(collected_data[k])\n\n return collected_data", "def collater(self, samples):\n batch = self.base_dataset.collater(samples)\n # In case of an empty batch, return an empty dict\n if len(batch) == 0:\n return {}\n auxiliary_targets_map = {}\n for i, s in enumerate(samples):\n auxiliary_targets_map[s['id']] = i\n sort_order = []\n for s_id in batch['id'].tolist():\n sort_order.append(auxiliary_targets_map[s_id])\n sort_order = torch.tensor(sort_order)\n auxiliary_target = torch.stack([s[\"auxiliary_target\"] for s in samples])\n batch['auxiliary_target'] = auxiliary_target.index_select(0, sort_order)\n return batch", "def mycollate(batch):\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(error_msg_fmt.format(elem.dtype))\n\n return default_collate([torch.from_numpy(b) for b in batch])\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(batch[0], int_classes):\n return torch.tensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping):\n if 'dataset' not in batch[0] or batch[0]['dataset'].neib_samp not in ('sampling', 'best', 'relation'):\n return {key: default_collate([d[key] for d in batch]) for key in batch[0] if key not in ['weight','impt','dataset']}\n relations = batch[0]['dataset'].tr_grp\n if batch[0]['dataset'].neib_samp == 'relation':\n nodes2 = sum([d['impt'] for d in batch],[])\n else:\n w= sum([d['weight'] for d in batch], Counter())\n [w.pop(d['index'], None) for d in batch] \n if batch[0]['dataset'].neib_samp == 'sampling':\n p = FlexCounter(w)/sum(w.values())\n nodes2 = np.random.choice(list(p.keys()), batch[0]['dataset'].k, replace=False, p=list(p.values()))\n elif batch[0]['dataset'].neib_samp == 'best':\n nodes2 = nlargest(batch[0]['dataset'].k, w, key = w.get) \n \n neib_batch = [batch[0]['dataset']._getimage(x,True,1) for x in nodes2]\n [(d.pop('weight', None), d.pop('dataset', None)) for d in batch]\n batch = neib_batch + batch\n coll = default_collate(batch)\n adj_mats = {r: np.zeros((len(batch), len(batch))) for r in relations}\n for r in relations:\n for i, b1 in enumerate(coll[r]):\n for j, b2 in enumerate(coll[r]):\n if i!=j:\n adj_mats[r][i,j] = 1 if b1==b2 else 0\n adj_mats[r] = adj_norm(adj_mats[r]) \n coll['adj'] = adj_mats\n coll['k'] = len(nodes2)\n return coll\n \n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg_fmt.format(type(batch[0]))))", "def variable_tensor_size_collator(batch):\n assert isinstance(batch, Iterable)\n transpose = list(zip(*batch))\n for idx, field in enumerate(transpose):\n try:\n transpose[idx] = torch.stack(field)\n except Exception:\n continue\n return transpose", "def prepare_batches(self, pairs, batch_size):\n\t\treturn MATHBatch.create_from_items(pairs, batch_size)", "def _collate(cls, inbatch, num_devices=None):\n item0 = inbatch[0]\n bsize = len(inbatch)\n if num_devices is None:\n num_devices = 1\n\n samples_per_device = int(np.ceil(bsize / num_devices))\n\n # assert bsize % samples_per_device == 0\n stacked = []\n if item0.cpu_only:\n # chunking logic\n stacked = []\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n\n elif item0.stack:\n for i in range(0, bsize, samples_per_device):\n item = inbatch[i]\n pad_dims_ = item.pad_dims\n assert isinstance(item.data, torch.Tensor)\n\n if pad_dims_ is not None:\n # Note: can probably reimplement this using padded collate\n # logic\n ndim = item.dim()\n assert ndim > pad_dims_\n max_shape = [0 for _ in range(pad_dims_)]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = item.shape[-dim]\n for sample in inbatch[i:i + samples_per_device]:\n for dim in range(0, ndim - pad_dims_):\n assert item.shape[dim] == sample.shape[dim]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1], sample.shape[-dim])\n padded_samples = []\n for sample in inbatch[i:i + samples_per_device]:\n pad = [0 for _ in range(pad_dims_ * 2)]\n for dim in range(1, pad_dims_ + 1):\n pad[2 * dim - 1] = max_shape[dim - 1] - sample.shape[-dim]\n padded_samples.append(\n F.pad(sample.data, pad, value=sample.padding_value))\n stacked.append(default_collate(padded_samples))\n\n elif pad_dims_ is None:\n stacked.append(\n default_collate([\n sample.data\n for sample in inbatch[i:i + samples_per_device]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n result = BatchContainer(stacked, **item0.meta)\n return result", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(self, batch):\n images = list()\n boxes = list()\n labels = list()\n difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n difficulties.append(b[3])\n\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each", "def basic_collate(batch):\n\n minibatch, targets = zip(*[(a, b) for (a,b) in batch])\n minibatch = stack(minibatch, dim=0)\n return minibatch, targets", "def collate_fn(batch):\n # eliminate invalid data (where boxes is [] tensor)\n old_batch_len = len(batch)\n batch = [x for x in batch if x[1]['boxes'].shape[0] != 0]\n # try refill empty sample by other sample in current batch\n #print('batch len = ', old_batch_len)\n #print('new batch len = ', len(batch))\n new_batch_len = len(batch)\n for i in range(new_batch_len, old_batch_len):\n batch.append(copy.deepcopy(batch[i%new_batch_len]))\n #print('batch = ', batch)\n #print('filled batch len = ', len(batch))\n batch = list(zip(*batch)) # batch[0]: data tensor, batch[1]: targets dict\n\n batch[0] = nested_tensor_from_tensor_list(batch[0])\n return tuple(batch)", "def conv_batchify(self, batch):\n batch_roles = []\n batch_context_tokens = []\n batch_response = []\n\n for conv_dict in batch:\n batch_roles.append(0 if conv_dict['role'] == 'Seeker' else 1)\n context_tokens = [utter + [self.conv_bos_id] for utter in conv_dict['context_tokens']]\n context_tokens[-1] = context_tokens[-1][:-1]\n batch_context_tokens.append(\n truncate(merge_utt(context_tokens), max_length=self.context_truncate, truncate_tail=False),\n )\n batch_response.append(\n add_start_end_token_idx(\n truncate(conv_dict['response'], max_length=self.response_truncate - 2),\n start_token_idx=self.start_token_idx,\n end_token_idx=self.end_token_idx\n )\n )\n\n batch_context_tokens = padded_tensor(items=batch_context_tokens,\n pad_idx=self.pad_token_idx,\n max_len=self.context_truncate,\n pad_tail=False)\n batch_response = padded_tensor(batch_response,\n pad_idx=self.pad_token_idx,\n max_len=self.response_truncate,\n pad_tail=True)\n batch_input_ids = torch.cat((batch_context_tokens, batch_response), dim=1)\n batch_roles = torch.tensor(batch_roles)\n\n return (batch_roles,\n batch_input_ids,\n batch_context_tokens,\n batch_response)", "def custom_collate(batch):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n matched = True\n for dim in range(batch[0].dim()):\n lst = list(map(lambda x: x.size(dim), batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return torch.stack(batch, 0, out=out)\n else:\n return pad_sequence(batch, batch_first=True)\n # indices, items = zip(*sorted(enumerate(batch), key=lambda x: x[1].size(0), reverse=True))\n # lengths = [batch[i].size(0) for i in indices]\n # logger.info(lengths)\n # return pad_sequence([batch[i] for i in indices], batch_first=True), lengths\n elif isinstance(batch[0], np.ndarray):\n matched = True\n for dim in range(batch[0].ndim):\n lst = list(map(lambda x: x.shape[dim], batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return np.stack(batch, 0)\n else:\n raise ValueError('dimensions are not matched {}'.format(batch[0].shape))\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n raise ValueError('cannot handle numpy data')\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.abc.Mapping):\n return {key: custom_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.abc.Sequence):\n transposed = zip(*batch)\n return [custom_collate(samples) for samples in transposed]\n raise TypeError((error_msg.format(type(batch[0]))))", "def collate_fn(batch, samples_per_gpu=1):\n if not isinstance(batch, Sequence):\n raise TypeError(f'{batch.dtype} is not supported.')\n\n if isinstance(batch[0], list):\n batch = [item for _ in batch for item in _]\n\n if isinstance(batch[0], DataContainer):\n assert len(batch) % samples_per_gpu == 0\n stacked = []\n if batch[0].cpu_only:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(\n stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)\n elif batch[0].stack:\n for i in range(0, len(batch), samples_per_gpu):\n assert isinstance(batch[i].data, torch.Tensor)\n\n if batch[i].pad_dims is not None:\n ndim = batch[i].dim()\n assert ndim > batch[i].pad_dims\n max_shape = [0 for _ in range(batch[i].pad_dims)]\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = batch[i].size(-dim)\n for sample in batch[i:i + samples_per_gpu]:\n for dim in range(0, ndim - batch[i].pad_dims):\n assert batch[i].size(dim) == sample.size(dim)\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1],\n sample.size(-dim))\n padded_samples = []\n for sample in batch[i:i + samples_per_gpu]:\n pad = [0 for _ in range(batch[i].pad_dims * 2)]\n for dim in range(1, batch[i].pad_dims + 1):\n pad[2 * dim -\n 1] = max_shape[dim - 1] - sample.size(-dim)\n padded_samples.append(\n F.pad(\n sample.data, pad, value=sample.padding_value))\n stacked.append(collate(padded_samples))\n elif batch[i].pad_dims is None:\n stacked.append(\n collate([\n sample.data\n for sample in batch[i:i + samples_per_gpu]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(stacked, batch[0].stack, batch[0].padding_value)\n elif isinstance(batch[0], Sequence):\n transposed = zip(*batch)\n return [collate(samples, samples_per_gpu) for samples in transposed]\n\n elif isinstance(batch[0], Mapping):\n res = dict()\n for key in batch[0]:\n if isinstance(batch[0][key], torch.Tensor):\n res.update({key: collate([d[key] for d in batch], samples_per_gpu)})\n else:\n res.update({key: [d[key] for d in batch]})\n\n return res\n # return {\n # key: collate([d[key] for d in batch], samples_per_gpu)\n # for key in batch[0]\n # }\n else:\n return collate(batch)", "def batchify(TEXT, data, batch_size, device):\r\n data = TEXT.numericalize([data.examples[0].text])\r\n num_batches = data.size(0)//batch_size\r\n data = data.narrow(0, 0, num_batches * batch_size)\r\n data = data.view(batch_size, -1).t().contiguous()\r\n\r\n return data.to(device)", "def make_localized_batches(data, nbatches, batch_size, field_size, stride):\n batches = {}\n for i in xrange(0, data['images'].shape[1] - field_size + 1, stride):\n for j in xrange(0, data['images'].shape[2] - field_size + 1, stride):\n batches[(i, j)] = make_unsupervised_batches(data['images']\n [:, i:i + field_size, j:j + field_size, :],\n nbatches, batch_size)\n return batches", "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data", "def variable_time_collate_fn2(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n data_min = None, data_max = None):\n D = batch[0][2].shape[1]\n len_tt = [ex[1].size(0) for ex in batch]\n maxlen = np.max(len_tt)\n enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)\n enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)\n enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)\n for b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n currlen = tt.size(0)\n enc_combined_tt[b, :currlen] = tt.to(device) \n enc_combined_vals[b, :currlen] = vals.to(device) \n enc_combined_mask[b, :currlen] = mask.to(device) \n \n combined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)\n combined_tt = combined_tt.to(device)\n\n offset = 0\n combined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n combined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\n combined_labels = None\n N_labels = 1\n\n combined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))\n combined_labels = combined_labels.to(device = device)\n\n for b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n tt = tt.to(device)\n vals = vals.to(device)\n mask = mask.to(device)\n if labels is not None:\n labels = labels.to(device)\n\n indices = inverse_indices[offset:offset + len(tt)]\n offset += len(tt)\n\n combined_vals[b, indices] = vals\n combined_mask[b, indices] = mask\n\n if labels is not None:\n combined_labels[b] = labels\n\n combined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask, \n att_min = data_min, att_max = data_max)\n enc_combined_vals, _, _ = utils.normalize_masked_data(enc_combined_vals, enc_combined_mask, \n att_min = data_min, att_max = data_max)\n\n if torch.max(combined_tt) != 0.:\n combined_tt = combined_tt / torch.max(combined_tt)\n enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)\n \n data_dict = {\n \"enc_data\":enc_combined_vals,\n \"enc_mask\":enc_combined_mask,\n \"enc_time_steps\":enc_combined_tt,\n \"data\": combined_vals, \n \"time_steps\": combined_tt,\n \"mask\": combined_mask,\n \"labels\": combined_labels}\n\n data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)\n return data_dict", "def collate_fn(self, batch):\n images, boxes, categories = [], [], []\n\n for b in batch:\n images.append(b['img'])\n boxes.append(b['box'])\n categories.append(b['category'])\n\n images = torch.stack(images, dim=0)\n\n # tensor (N, 3, 300, 300), 3 lists of N tensors each\n return {\n 'imgs': images,\n 'boxes': boxes,\n 'categories': categories\n }", "def collate_fn(batch):\n file = [item[\"file\"] for item in batch]\n wave = torch.cat([item[\"wave\"] for item in batch], dim=0)\n return {\"file\": file, \"wave\": wave}", "def collate_fn(batch):\n file = [item[\"file\"] for item in batch]\n wave = torch.cat([item[\"wave\"] for item in batch], dim=0)\n return {\"file\": file, \"wave\": wave}", "def batchify(self, i, iterator):\n print(f'Starting Batch {i}')\n iterator = [item.strip() for item in iterator]\n max_length = self.max_seq_length - 2 # for special tokens\n\n batches = []\n n = len(iterator)\n sentence_count = 0\n index_start = 0\n index_stop = 0\n\n while index_stop < n:\n if (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length):\n index_start += 1\n index_stop += 1\n while (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length) and (index_stop<n):\n index_stop += 1\n batches.append(iterator[index_start:index_stop])\n index_start = index_stop\n print(f'Batch {i} Done')\n return batches", "def batchify_cache(fn, chunk):\n if chunk is None:\n return fn\n\n def ret(inputs, training = False):\n\n ret_list = [fn(inputs[i:i+chunk], training=training) for i in range(0, int(inputs.shape[0]), chunk)]\n\n return tf.concat([ret for ret in ret_list], 0)\n return ret", "def batch_collate_fn(batch):\n images = []\n masks = []\n \n for (image, trimap, mask) in batch:\n mask = mask.unsqueeze(0)\n trimap = trimap.unsqueeze(0)\n image = torch.cat([image, trimap], 0).unsqueeze(0)\n \n images.append(image)\n masks.append(mask)\n\n images = torch.cat(images, 0)\n masks = torch.cat(masks, 0)\n\n return (images, masks)", "def collate_fn(batch):\n text = [item[0] for item in batch]\n audio = [item[1] for item in batch]\n\n text_lengths = [len(x) for x in text]\n audio_lengths = [len(x) for x in audio]\n\n max_text = max(text_lengths)\n max_audio = max(audio_lengths)\n\n text_batch = np.stack(pad_text(x, max_text) for x in text)\n audio_batch = np.stack(pad_spectrogram(x, max_audio) for x in audio)\n\n return (torch.LongTensor(text_batch),\n torch.FloatTensor(audio_batch).permute(1, 0, 2),\n text_lengths, audio_lengths)", "def batch(self, sql):\n return _Batch(self.conn, sql)", "def shatter_batch(self, batch):\n return [tuple([elem[i] for elem in batch])\n for i in range(batch.size)]", "def multibatch_generic_csv_generator():\n\n def _multibatch_generic_csv_generator(\n data_path: str,\n start_date: Optional[datetime.datetime] = None,\n num_event_batches: Optional[int] = 20,\n num_events_per_batch: Optional[int] = 5,\n ) -> List[str]:\n\n if start_date is None:\n start_date = datetime.datetime(2000, 1, 1)\n\n file_list = []\n category_strings = {\n 0: \"category0\",\n 1: \"category1\",\n 2: \"category2\",\n 3: \"category3\",\n 4: \"category4\",\n 5: \"category5\",\n 6: \"category6\",\n }\n for batch_num in range(num_event_batches):\n # generate a dataframe with multiple column types\n batch_start_date = start_date + datetime.timedelta(\n days=(batch_num * num_events_per_batch)\n )\n # TODO: AJB 20210416 Add more column types\n df = pd.DataFrame(\n {\n \"event_date\": [\n (batch_start_date + datetime.timedelta(days=i)).strftime(\n \"%Y-%m-%d\"\n )\n for i in range(num_events_per_batch)\n ],\n \"batch_num\": [batch_num + 1 for _ in range(num_events_per_batch)],\n \"string_cardinality_3\": [\n category_strings[i % 3] for i in range(num_events_per_batch)\n ],\n }\n )\n filename = f\"csv_batch_{batch_num + 1:03}_of_{num_event_batches:03}.csv\"\n file_list.append(filename)\n df.to_csv(\n os.path.join(data_path, filename),\n index_label=\"intra_batch_index\",\n )\n\n return file_list\n\n return _multibatch_generic_csv_generator", "def _msdd_train_collate_fn(self, batch):\n packed_batch = list(zip(*batch))\n features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets = packed_batch\n features_list, feature_length_list = [], []\n ms_seg_timestamps_list, ms_seg_counts_list, scale_clus_label_list, scale_mapping_list, targets_list = (\n [],\n [],\n [],\n [],\n [],\n )\n\n max_raw_feat_len = max([x.shape[0] for x in features])\n max_target_len = max([x.shape[0] for x in targets])\n max_total_seg_len = max([x.shape[0] for x in clus_label_index])\n\n for feat, feat_len, ms_seg_ts, ms_seg_ct, scale_clus, scl_map, tgt in batch:\n seq_len = tgt.shape[0]\n pad_feat = (0, max_raw_feat_len - feat_len)\n pad_tgt = (0, 0, 0, max_target_len - seq_len)\n pad_sm = (0, max_target_len - seq_len)\n pad_ts = (0, 0, 0, max_target_len - seq_len)\n pad_sc = (0, max_total_seg_len - scale_clus.shape[0])\n padded_feat = torch.nn.functional.pad(feat, pad_feat)\n padded_tgt = torch.nn.functional.pad(tgt, pad_tgt)\n padded_sm = torch.nn.functional.pad(scl_map, pad_sm)\n padded_ms_seg_ts = torch.nn.functional.pad(ms_seg_ts, pad_ts)\n padded_scale_clus = torch.nn.functional.pad(scale_clus, pad_sc)\n\n features_list.append(padded_feat)\n feature_length_list.append(feat_len.clone().detach())\n ms_seg_timestamps_list.append(padded_ms_seg_ts)\n ms_seg_counts_list.append(ms_seg_ct.clone().detach())\n scale_clus_label_list.append(padded_scale_clus)\n scale_mapping_list.append(padded_sm)\n targets_list.append(padded_tgt)\n\n features = torch.stack(features_list)\n feature_length = torch.stack(feature_length_list)\n ms_seg_timestamps = torch.stack(ms_seg_timestamps_list)\n clus_label_index = torch.stack(scale_clus_label_list)\n ms_seg_counts = torch.stack(ms_seg_counts_list)\n scale_mapping = torch.stack(scale_mapping_list)\n targets = torch.stack(targets_list)\n return features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets", "def collate_fn(self, batch):\n images = list()\n targets = list()\n\n for b in batch:\n images.append(b[0])\n targets.append(b[1])\n\n # images = torch.stack(images, dim=0)\n\n return images, targets # tensor (N, 3, 300, 300), 3 lists of N tensors each", "def batchify(data, bsz, shuffle=True):\r\n if shuffle: np.random.shuffle(data)\r\n batched_data = []\r\n for i in range(len(data)):\r\n if i % bsz == 0:\r\n batched_data.append([data[i]])\r\n else:\r\n batched_data[len(batched_data) - 1].append(data[i])\r\n if len(batched_data[-1])==1: return batched_data[:-1]\r\n return batched_data", "def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size", "def _one_mini_batch(self, data, indices, pad_id):\n batch_data = {'raw_data': [data[i] for i in indices],\n 'question_token_ids': [],\n 'question_length': [],\n 'passage_token_ids': [],\n 'passage_length': [],\n 'start_id': [],\n 'end_id': []}\n max_passage_num = max([len(sample['passages']) for sample in batch_data['raw_data']])\n max_passage_num = min(self.max_p_num, max_passage_num)\n for sidx, sample in enumerate(batch_data['raw_data']):\n for pidx in range(max_passage_num):\n if pidx < len(sample['passages']):\n batch_data['question_token_ids'].append(sample['question_token_ids'])\n batch_data['question_length'].append(len(sample['question_token_ids']))\n passage_token_ids = sample['passages'][pidx]['passage_token_ids']\n batch_data['passage_token_ids'].append(passage_token_ids)\n batch_data['passage_length'].append(min(len(passage_token_ids), self.max_p_len))\n else:\n batch_data['question_token_ids'].append([])\n batch_data['question_length'].append(0)\n batch_data['passage_token_ids'].append([])\n batch_data['passage_length'].append(0)\n batch_data, padded_p_len, padded_q_len = self._dynamic_padding(batch_data, pad_id)\n for sample in batch_data['raw_data']:\n if 'answer_passages' in sample and len(sample['answer_passages']):\n gold_passage_offset = padded_p_len * sample['answer_passages'][0]\n batch_data['start_id'].append(gold_passage_offset + sample['answer_spans'][0][0])\n batch_data['end_id'].append(gold_passage_offset + sample['answer_spans'][0][1])\n else:\n # fake span for some samples, only valid for testing\n batch_data['start_id'].append(0)\n batch_data['end_id'].append(0)\n return batch_data", "def batch_generator(data_frame_encoded):\n labels = data_frame_encoded[-1]\n # data = np.delete(data_frame_encoded, -1, axis=0)\n data = data_frame_encoded[:-1]\n\n num_features = len(data)\n num_batches = len(data[0])\n for i in range(num_batches):\n batch_compiled = []\n for j in range(num_features):\n if type(data[j][i]) is np.ndarray:\n batch_compiled.extend(data[j][i])\n else:\n batch_compiled.extend([data[j][i]])\n yield batch_compiled, labels[i]", "def graph_collate(batch):\n elem = batch[0]\n if isinstance(elem, Data):\n batch = Batch.from_data_list(batch)\n return batch, batch.y", "def collate_fn(batch):\n sentence1 = [item[0] for item in batch]\n sentence2 = [item[1] for item in batch]\n label = [item[2] for item in batch]\n label = torch.tensor(label)\n return sentence1, sentence2, label", "def unbatch():\n\n def _apply_fn(dataset):\n return dataset.unbatch()\n\n return _apply_fn", "def postprocessing(batch, vocab):\n\n return batch", "def _batchify(self, data_containers: Dict, batch_size):\n\n X = Variable(torch.LongTensor(data_containers['X'])).to(self.device)\n Y = Variable(torch.FloatTensor(data_containers['Y'])).to(self.device)\n\n data_size = X.size()[0]\n num_batches = data_size // batch_size\n\n return [\n (X[bi * batch_size: (bi + 1) * batch_size],\n Y[bi * batch_size: (bi + 1) * batch_size].unsqueeze(1))\n for bi in range(num_batches + 1)\n ]", "def pack_data_into_batches(self, ids):\n\n # create buckets sorted by the number of src tokens\n # each bucket is also sorted by the number of tgt tokens\n buckets = {}\n for i, line_ids in enumerate(ids):\n len_ = len(line_ids)\n if len_ not in buckets:\n buckets[len_] = [i]\n else:\n buckets[len_].append(i)\n\n for b_idx in buckets:\n buckets[b_idx] = sorted(buckets[b_idx])\n\n buckets = OrderedDict(sorted(buckets.items()))\n\n batches = []\n batch_elem_lengths = []\n curr_batch = []\n len_of_longest_sent = 0\n for sent_len, bucket in buckets.items():\n for sent_i in bucket:\n if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:\n if not curr_batch:\n raise ValueError(\n f\"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong.\"\n f\"Several sentences contain {sent_len} tokens.\"\n )\n batches.append(curr_batch)\n batch_elem_lengths.append(sent_len)\n curr_batch = []\n curr_batch.append(sent_i)\n len_of_longest_sent = sent_len\n if curr_batch:\n batches.append(curr_batch)\n batch_elem_lengths.append(len_of_longest_sent)\n return batches, batch_elem_lengths", "def batch_apply(self, batch, is_train=False, stats=None, **kwargs):\n if self.max_context == 0:\n return batch\n trf_batch = []\n doc = {}\n doc[\"src\"] = []\n doc[\"tgt\"] = []\n doc[\"indices\"] = 0\n\n for ex, _, cid in batch:\n if ex[\"tgt\"] is not None:\n cur_len = max(len(doc[\"src\"] + ex[\"src\"]), len(doc[\"tgt\"] + ex[\"tgt\"]))\n\n if len(ex[\"src\"]) == 0 and len(ex[\"tgt\"]) == 0:\n # doc break we add it, restart new doc\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"tgt\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n elif cur_len > self.doc_length:\n if len(doc[\"src\"]) == 0:\n # case 1st ex is already longer\n trf_batch.append((ex, self, cid))\n else:\n # adding cur ex is too long we add cur doc\n # and reset doc to cur ex\n trf_batch.append((doc, self, cid))\n doc = copy.deepcopy(ex)\n else:\n if len(doc[\"src\"]) == 0:\n # we start the new doc with cur ex\n doc = copy.deepcopy(ex)\n else:\n # we cumulate cur ex to cur doc\n doc[\"src\"] += [DefaultTokens.SEP] + ex[\"src\"]\n doc[\"src_original\"] += [DefaultTokens.SEP] + ex[\"src_original\"]\n doc[\"tgt\"] += [DefaultTokens.SEP] + ex[\"tgt\"]\n doc[\"tgt_original\"] += [DefaultTokens.SEP] + ex[\"tgt_original\"]\n nb_ctx = doc[\"src\"].count(DefaultTokens.SEP)\n if nb_ctx >= self.max_context:\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"tgt\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n else:\n cur_len = len(doc[\"src\"] + ex[\"src\"])\n doc[\"tgt\"] = None\n if len(ex[\"src\"]) == 0:\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n elif cur_len > self.doc_length:\n if len(doc[\"src\"]) == 0:\n trf_batch.append((ex, self, cid))\n else:\n trf_batch.append((doc, self, cid))\n doc = copy.deepcopy(ex)\n else:\n if len(doc[\"src\"]) == 0:\n doc = copy.deepcopy(ex)\n else:\n doc[\"src\"] += [DefaultTokens.SEP] + ex[\"src\"]\n doc[\"src_original\"] += [DefaultTokens.SEP] + ex[\"src_original\"]\n nb_ctx = doc[\"src\"].count(DefaultTokens.SEP)\n if nb_ctx >= self.max_context:\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n if len(doc[\"src\"]) > 0:\n trf_batch.append((doc, self, cid))\n return trf_batch", "def make_column_transformer(*transformers, remainder=..., sparse_threshold=..., n_jobs=..., verbose=...):\n ...", "def collator(self, batch):\n\n # Retrieve data from batch\n ids = [item[\"ids\"] for item in batch]\n label = [item[\"label\"] for item in batch]\n\n # Sort the list\n ids, label = map(\n list,\n zip(\n *sorted(\n zip(ids, label), key=lambda _tuple: len(_tuple[0]), reverse=True,\n )\n ),\n )\n\n max_len = len(ids[0])\n\n # Initialize seq len list\n text_lengths = []\n new_ids = []\n for id in ids:\n\n _len = len(id)\n pad_len = max_len - _len\n\n if pad_len < 0:\n id = id[:max_len]\n else:\n id = np.pad(\n id, (0, pad_len), \"constant\", constant_values=self.pad_id\n ).tolist()\n\n new_ids.append(id)\n\n text_lengths.append(_len if _len < max_len else max_len)\n\n label = torch.tensor(label)\n text_lengths = torch.tensor(text_lengths)\n text = np.stack(new_ids)\n text = torch.from_numpy(text)\n\n return {\"label\": label, \"text_lengths\": text_lengths, \"text\": text}", "def tranform_data(args):\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))", "def post_process_wrapper(cls: Type[T]) -> Type[T]:\n\n class _Wrapper(cls):\n def __init__(self, table, *additional_tables, **kwargs):\n postprocessors = kwargs.pop(\"postprocessors\", dict())\n if not hasattr(postprocessors, \"get\"):\n postprocessors = {0: postprocessors}\n for key, value in list(postprocessors.items()):\n value = tuple(\n alias_factory_subclass_from_arg(PostProcessor, postprocessor)\n for postprocessor in value\n )\n postprocessors[key] = value\n self.postprocessors = postprocessors\n postprocess_axis = kwargs.pop(\"postprocess_axis\", -1)\n if not hasattr(postprocess_axis, \"__len__\"):\n postprocess_axis = (postprocess_axis,)\n if not hasattr(postprocess_axis, \"get\"):\n post_dict = dict()\n for key in postprocessors:\n post_dict[key] = postprocess_axis\n postprocess_axis = post_dict\n self.postprocess_axis = postprocess_axis\n super(_Wrapper, self).__init__(table, *additional_tables, **kwargs)\n\n def batch_generator(self, repeat=False):\n subsamples = self.num_sub != 1\n for batch in super(_Wrapper, self).batch_generator(repeat=repeat):\n if subsamples:\n cur_batch = []\n for sub_batch_idx, sub_batch in enumerate(batch):\n for postprocessor, axis in zip(\n self.postprocessors.get(sub_batch_idx, tuple()),\n cycle(self.postprocess_axis.get(sub_batch_idx, tuple())),\n ):\n sub_batch = postprocessor.apply(\n sub_batch, axis=axis, in_place=True\n )\n cur_batch.append(sub_batch)\n yield tuple(cur_batch)\n else:\n for postprocessor, axis in zip(\n self.postprocessors[0], cycle(self.postprocess_axis[0])\n ):\n batch = postprocessor.apply(batch, axis=axis, in_place=True)\n yield batch\n\n _Wrapper.__doc__ = cls.__doc__ + post_process_wrapper.WRAPPED_DATA_DOC\n return _Wrapper", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def encode_dataset(batch_size,downscale_factor,dataset, pooling_function):\n \n n,l=np.shape(dataset)\n f=downscale_factor\n n_batches=n//batch_size\n batches=np.linspace(1,n_batches,n_batches, dtype=int) * batch_size\n\n gaf = GramianAngularField(image_size=1., method='summation')\n \n print('Encoding started...')\n for p in range(n_batches):\n if p==0:\n X_gaf = gaf.transform(dataset[0:batches[p],:])\n sample=block_reduce(X_gaf[0], block_size=(f, f), func=pooling_function)\n l_red = sample.shape[0]\n X_gaf_red = np.zeros((n,l_red,l_red))\n print('output 3D Matrix shape: ', np.shape(X_gaf_red))\n\n j=0\n for i in range(0,batches[p]):\n X_gaf_red[i] = block_reduce(X_gaf[j], block_size=(f, f) , func=pooling_function)\n j+=1\n\n else: \n X_gaf = gaf.transform(X[batches[p-1]:batches[p],:])\n\n j=0\n for i in range(batches[p-1],batches[p]):\n X_gaf_red[i] = block_reduce(X_gaf[j], block_size=(f, f) , func=pooling_function)\n j+=1\n \n print('Encoding successful!')\n print('#####################################')\n \n return X_gaf_red", "def collate_fn(batch: list[dict[str, Tensor]]) -> dict[str, Any]:\n output: dict[str, Any] = {}\n output[\"image\"] = torch.stack([sample[\"image\"] for sample in batch])\n output[\"boxes\"] = [sample[\"boxes\"] for sample in batch]\n output[\"labels\"] = [torch.tensor([1] * len(sample[\"boxes\"])) for sample in batch]\n return output", "def batch_split(self, batch_text, threads=8):\n pass", "def collate_fn(sample_list):\n x_ref_batch = []\n x_pos_batch = []\n x_negs_batch = []\n label_batch = []\n\n for sample in sample_list:\n x_ref_batch.append(sample[\"x_ref\"])\n x_pos_batch.append(sample[\"x_pos\"])\n x_negs_batch.append(sample[\"x_negs\"])\n label_batch.append(sample[\"label\"])\n\n # Use torch API for RNNs to pad samples to fixed length, L, and stack them in batch-tensor of dim (B,n_dim,L).\n x_ref_batch = pad_sequence(\n x_ref_batch,\n batch_first=True,\n padding_value=0) # (B,L,n_dim)\n x_ref_batch = x_ref_batch.transpose(1, 2) # (B,n_dim,L)\n\n x_pos_batch = pad_sequence(\n x_pos_batch,\n batch_first=True,\n padding_value=0) # (B,L,n_dim)\n x_pos_batch = x_pos_batch.transpose(1, 2) # (B,n_dim,L)\n\n # Pad neg tensors with varying length of first dim L, and produce batch (B,K,n_dim,L') where L' is padded length\n x_negs_batch = pad_sequence(x_negs_batch,\n batch_first=True,\n padding_value=0) # (B, L', K, n_dim)\n x_negs_batch = x_negs_batch.transpose(1, 2) # (B, K, L', n_dim)\n x_negs_batch = x_negs_batch.transpose(2, 3) # (B, K, n_dim, L')\n\n return {\n 'x_ref': x_ref_batch,\n 'x_pos': x_pos_batch,\n 'x_negs': x_negs_batch,\n 'label': label_batch\n }", "def batchify(batch):\n\n\tquestion_len = list()\n\tlabel_list = list()\n\tfor ex in batch:\n\t\tquestion_len.append(len(ex[0]))\n\t\tlabel_list.append(ex[1])\n\n\t'''\n\tPadding the labels - unequal length sequences for sequenial data like we have. \n\tSince actual labels are 0/1 - we pad with -1, and will use this when 'masking' labels during loss and\n\taccuracy evaluation.\n\t'''\n\ttarget_labels = torch.nn.utils.rnn.pad_sequence([torch.tensor(y) for y in label_list], padding_value=-1).t()\n\n\t# dimension is dimension of every feature vector = n_guesses in this homework setting\n\tdim = batch[0][0].shape[1]\n\n\t# similar padding happens for the feature vectors, with vector of all zeros appended.\n\tx1 = torch.FloatTensor(len(question_len), max(question_len), dim).zero_()\n\tfor i in range(len(question_len)):\n\t\tquestion_feature_vec = batch[i][0]\n\t\tvec = torch.FloatTensor(question_feature_vec)\n\t\tx1[i, :len(question_feature_vec)].copy_(vec)\n\tq_batch = {'feature_vec': x1, 'len': torch.FloatTensor(question_len), 'labels': target_labels}\n\treturn q_batch", "def coco_collate_fn(batch):\n all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img = [], [], [], [], []\n\n for i, (img, objs, boxes, masks) in enumerate(batch):\n all_imgs.append(img[None])\n O = objs.size(0)\n all_objs.append(objs)\n all_boxes.append(boxes)\n all_masks.append(masks)\n\n all_obj_to_img.append(torch.LongTensor(O).fill_(i))\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.cat(all_objs)\n all_boxes = torch.cat(all_boxes)\n all_masks = torch.cat(all_masks)\n all_obj_to_img = torch.cat(all_obj_to_img)\n\n out = (all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img)\n\n return out", "def _get_batch_data(self, batch):\n try:\n encoders = [ encoder for encoder in self._data_encoder ]\n except:\n encoders = (self._data_encoder,)\n\n try:\n data_batches = [ encoder.transform_batch(rec for _, rec in batch.iterrows())\n for encoder in encoders ]\n except AttributeError:\n data_batches = [\n [ self._get_data(record, encoder) for _, record in batch.iterrows() ]\n for encoder in encoders ]\n\n try:\n batches = [ np.array(encoder.finalize_batch(batch))\n for encoder, batch in zip(encoders, data_batches)]\n except AttributeError:\n batches = [ np.array(batch) for batch in data_batches ]\n\n return batches if len(batches) > 1 else batches[0]", "def variable_time_collate_fn(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n\tdata_min = None, data_max = None):\n\tD = batch[0][2].shape[1]\n\tcombined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)\n\tcombined_tt = combined_tt.to(device)\n\n\toffset = 0\n\tcombined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\tcombined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\t\n\tcombined_labels = None\n\tN_labels = 1\n\n\tcombined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))\n\tcombined_labels = combined_labels.to(device = device)\n\t\n\tfor b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n\t\ttt = tt.to(device)\n\t\tvals = vals.to(device)\n\t\tmask = mask.to(device)\n\t\tif labels is not None:\n\t\t\tlabels = labels.to(device)\n\n\t\tindices = inverse_indices[offset:offset + len(tt)]\n\t\toffset += len(tt)\n\n\t\tcombined_vals[b, indices] = vals\n\t\tcombined_mask[b, indices] = mask\n\n\t\tif labels is not None:\n\t\t\tcombined_labels[b] = labels\n\n\tcombined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask, \n\t\tatt_min = data_min, att_max = data_max)\n\n\tif torch.max(combined_tt) != 0.:\n\t\tcombined_tt = combined_tt / torch.max(combined_tt)\n\t\t\n\tdata_dict = {\n\t\t\"data\": combined_vals, \n\t\t\"time_steps\": combined_tt,\n\t\t\"mask\": combined_mask,\n\t\t\"labels\": combined_labels}\n\n\tdata_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)\n\treturn data_dict", "def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):\n chunked_data = self._get_chunk_data(\n map(self.pipeline, inputs), batch_size)\n yield from map(self.collate_fn, chunked_data)", "def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data", "def make_block(self, block_ix, downsample, ker_size, block_len):\n stride = int(downsample) + 1\n n_in_filters = self.filters[block_ix]\n n_filters = self.filters[block_ix+1]\n mult_fact = 1 if block_ix == 0 else 6\n\n block = [MBConv(n_in_filters, n_filters, ker_size, stride, mult_fact)]\n block += [MBConv(n_filters, n_filters, ker_size, 1, mult_fact) for _ in range(block_len-1)]\n return block", "def bucketed_next(self):\n # Initialize batch containers\n label_batch = list()\n enc_input_batch = list()\n dec_input_batch = list()\n if self.bucket_id < self.opt.num_buckets:\n # Fill individual batches by iterating over bucket contents\n while len(enc_input_batch) < self.opt.batch_size:\n try:\n indexed_sent = self.data[self.bucket_id][self.sent_id]\n label_item = indexed_sent[1:]\n enc_input_item = indexed_sent[1:]\n # Reverse the input to the encoder, see arxiv.org/pdf/1703.03906.pdf\n enc_input_item.reverse()\n dec_input_item = indexed_sent[:-1]\n label_batch.append(label_item)\n enc_input_batch.append(enc_input_item)\n dec_input_batch.append(dec_input_item)\n self.sent_id += 1\n except IndexError:\n # Finish batch prematurely if current bucket has been exhausted, i.e. no mixed-bucket batches\n self.sent_id = 0\n self.bucket_id += 1\n break\n # Check if bucket is empty, to prevent empty batches from being generated\n try:\n if self.sent_id == len(self.data[self.bucket_id]):\n self.bucket_id += 1\n except IndexError:\n pass\n else:\n raise IndexError\n return label_batch, enc_input_batch, dec_input_batch", "def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return", "def _collate_fn(batch):\n def _pad(seqs, dtype=torch.float32):\n \"\"\" Pads a batch of sequences of varying seq_len. \"\"\"\n assert len(seqs) > 0 and all(x.shape[1:] == seqs[0].shape[1:] for x in seqs)\n lens = torch.LongTensor([len(x) for x in seqs])\n max_seq_len = torch.max(lens)\n\n # padded_seq_dims: (batch, max_seq_len, ...).\n padded_seq_dims = (len(seqs), max_seq_len,) + seqs[0].shape[1:]\n res = torch.zeros(padded_seq_dims, dtype=dtype)\n for i, seq in enumerate(seqs):\n src_len = lens[i]\n res[i, :src_len] = torch.Tensor(seq)\n return res, lens\n\n assert all(len(x) == 2 for x in batch)\n # (1, batch, (seq_len, 68, 3))\n frames, captions = zip(*batch)\n\n # Merge sequences (from tuple of 1D tensor to 2D tensor)\n # (batch, seq_len, ...)\n src_seqs, src_lens = _pad(frames, dtype=torch.float32)\n tgt_seqs, tgt_lens = _pad(captions, dtype=torch.long)\n return src_seqs, src_lens, tgt_seqs, tgt_lens", "def _batchify(batch):\n im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9 = zip(*batch)\n im0 = nd.stack(*im0)\n im1 = nd.stack(*im1)\n im2 = nd.stack(*im2)\n im3 = nd.stack(*im3)\n im4 = nd.stack(*im4)\n im5 = nd.stack(*im5)\n im6 = nd.stack(*im6)\n im7 = nd.stack(*im7)\n im8 = nd.stack(*im8)\n im9 = nd.stack(*im9)\n return im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9", "def rearrange_batch(batch):\n return list(zip(*batch))", "def list_data_collate(batch):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n return default_collate(data)", "def default_collate(batch):\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(error_msg_fmt.format(elem.dtype))\n\n return default_collate([torch.from_numpy(b) for b in batch])\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(batch[0], int_classes):\n return torch.tensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping): \n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg_fmt.format(type(batch[0]))))", "def serialize_batches():\n # a set of variables for the state of current batch which will be converted to Arrow\n # RecordBatch.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n return_schema = None\n\n for data in iterator:\n # data represents the result of each call of user function\n packaged_result = data[0]\n\n # There are two results from the call of user function:\n # 1) iterator of pandas DataFrame (output)\n # 2) updated state instance\n pdf_iter = packaged_result[0][0]\n state = packaged_result[0][1]\n\n # This is static and won't change across batches.\n return_schema = packaged_result[1]\n\n for pdf in pdf_iter:\n # We ignore empty pandas DataFrame.\n if len(pdf) > 0:\n pdf_data_cnt += len(pdf)\n pdfs.append(pdf)\n\n # If the total number of records in current batch exceeds the configured\n # threshold, time to construct the Arrow RecordBatch from the batch.\n if pdf_data_cnt > self.arrow_max_records_per_batch:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n # Reset the variables to start with new batch for further data.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n yield batch\n\n # This has to be performed 'after' evaluating all elements in iterator, so that\n # the user function has been completed and the state is guaranteed to be updated.\n state_pdf = construct_state_pdf(state)\n\n state_pdfs.append(state_pdf)\n state_data_cnt += 1\n\n # processed all output, but current batch may not be flushed yet.\n if pdf_data_cnt > 0 or state_data_cnt > 0:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n yield batch", "def collate_sentences(batch: List[Tuple]):\n # fill this list with all the labels in the batch\n batch_labels = []\n\n # we need to find the maximum length of a sentence in this batch\n max_len = 0\n for i in batch:\n if len(i[0]) > max_len:\n max_len = len(i[0])\n batch_size = len(batch)\n\n # print('batch size',batch_size)\n # initialize a Tensor filled with zeros (aka index of <PAD>)\n batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)\n\n # fill each row idx in batch_sentences with the corresponding\n # sequence tensor\n #\n # ... batch_sentences[idx, ...] = ...\n for idx in range(0, batch_size):\n # print(idx)\n # print(len(batch[idx][0]))\n # print(len(batch_sentences[idx]))\n batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]\n print(batch[idx])\n batch_labels.append(batch[idx][1])\n # print(batch_sentences[idx])\n print(type(batch_labels))\n # batch_labels = [torch.LongTensor(x) for x in batch_labels]\n batch_labels = torch.tensor(batch_labels)\n # print(batch_labels)\n return batch_sentences, batch_labels", "def combine_batches(chosen_dict):\n\n batches = set(sorted(chosen_dict.keys())) - {'meta_data'}\n batches = sorted(list(batches))\n root_dict = dict()\n root_dict['data'] = chosen_dict[batches[0]]['data']\n root_dict['labels'] = chosen_dict[batches[0]]['labels']\n root_dict['filenames'] = chosen_dict[batches[0]]['filenames']\n root_dict['meta_data'] = chosen_dict['meta_data']\n root_dict['meta_data'].append(batches[0])\n\n for curr_batch in batches[1:]:\n temp_dict = chosen_dict[curr_batch]\n root_dict['data'] = np.concatenate((root_dict['data'],\n temp_dict['data']),\n axis=0)\n root_dict['labels'] = root_dict['labels'] + temp_dict['labels']\n root_dict['filenames'] = root_dict['filenames'] + temp_dict['filenames']\n root_dict['meta_data'].append(curr_batch)\n\n tot_rows = root_dict['data'].shape[0]\n new_order = range(tot_rows)\n for _ in range(5):\n shuffle(new_order)\n\n ub_dict = dict()\n ub_data = np.zeros((tot_rows, 3072), dtype=root_dict['data'].dtype)\n ub_labels = [0] * tot_rows\n ub_filenames = [\"\"] * tot_rows\n\n for ctr, idx in enumerate(new_order):\n ub_data[ctr, :] = root_dict['data'][idx, :]\n ub_labels[ctr] = root_dict['labels'][idx]\n ub_filenames[ctr] = root_dict['filenames'][idx]\n\n ub_dict['data'] = ub_data\n ub_dict['labels'] = ub_labels\n ub_dict['filenames'] = ub_filenames\n ub_dict['meta_data'] = root_dict['meta_data']\n\n return ub_dict", "def rel_to_batch(rel_batch_p,rel_index_batch_p,data_iterator,dicts,frame):\n if frame ==\"amr\":\n lemma_dict,amr_category_dict = dicts[\"lemma_dict\"], dicts[\"amr_category_dict\"]\n data = [torch.LongTensor([[amr_category_dict[uni.cat],lemma_dict[uni.le],0] for uni in uni_seq]) for uni_seq in rel_batch_p ]\n elif frame==\"dm\":\n target_pos_dict,cat_dict = dicts[\"dm_target_pos_dict\"], dicts[\"dm_cat_dict\"]\n data = [torch.LongTensor([[target_pos_dict[uni.pos],cat_dict[uni.cat],0] for uni in uni_seq]) for uni_seq in rel_batch_p ]\n elif frame ==\"psd\":\n psd_target_pos_dict,psd_sense_dict = dicts[\"psd_target_pos_dict\"], dicts[\"psd_sense_dict\"]\n data = [torch.LongTensor([[psd_target_pos_dict[uni.pos],psd_sense_dict[uni.sense],0] for uni in uni_seq]) for uni_seq in rel_batch_p ]\n else:\n raise NotImplementedError(\"{} is not supported\".format(frame))\n\n rel_index = [torch.LongTensor(index) for index in rel_index_batch_p]\n rel_batch,rel_index_batch,rel_lengths = data_iterator._batchify_rel_concept(data,rel_index)\n return MyPackedSequence(rel_batch,rel_lengths),rel_index_batch", "def __call__(self, batch):\r\n '''\r\n for i in range(len(batch)):\r\n if batch[i].shape[1] != 861:\r\n batch[i] = batch[i - 1]\r\n '''\r\n return torch.tensor(batch)#torch.stack(batch, dim = 0)\r", "def coco_collate_fn_inferece(vocab, batch):\n # batch is a list, and each element is (image, objs, boxes, triplets)\n all_imgs, all_boxes, all_triplets, all_triplet_type, all_source_edges = [], [], [], [], []\n all_objs = []\n all_masks = []\n all_image_ids = []\n\n max_triplets = 0\n max_objects = 0\n for i, (img, objs, boxes, triplets, triplet_type, source_edges, masks, image_id) in enumerate(batch):\n O = objs[list(objs.keys())[0]].size(0)\n T = triplets.size(0)\n\n if max_objects < O:\n max_objects = O\n\n if max_triplets < T:\n max_triplets = T\n\n for i, (img, objs, boxes, triplets, triplet_type, source_edges, masks, image_id) in enumerate(batch):\n all_imgs.append(img[None])\n all_image_ids.append(image_id)\n O, T = objs[list(objs.keys())[0]].size(0), triplets.size(0)\n\n # Padded objs\n attributes = list(objs.keys())\n sorted(attributes)\n attributes_to_index = {attributes[i]: i for i in range(len(attributes))}\n attributes_objects = torch.zeros(len(attributes), max_objects, dtype=torch.long)\n\n for k, v in objs.items():\n # Padded objects\n if max_objects - O > 0:\n zeros_v = torch.zeros(max_objects - O, dtype=torch.long)\n padd_v = torch.cat([v, zeros_v])\n else:\n padd_v = v\n attributes_objects[attributes_to_index[k], :] = padd_v\n attributes_objects = attributes_objects.transpose(1, 0)\n\n # Padded boxes\n if max_objects - O > 0:\n padded_boxes = torch.FloatTensor([[-1, -1, -1, -1]]).repeat(max_objects - O, 1)\n boxes = torch.cat([boxes, padded_boxes])\n\n # Padded masks\n if masks is not None and max_objects - O > 0:\n padded_masks = torch.zeros([max_objects - O, masks.size(1), masks.size(2)]).type(torch.LongTensor)\n masks = torch.cat([masks, padded_masks])\n\n # Padded triplets\n if max_triplets - T > 0:\n padded_triplets = torch.LongTensor([[0, vocab[\"pred_name_to_idx\"][\"__padding__\"], 0]]).repeat(\n max_triplets - T, 1)\n triplets = torch.cat([triplets, padded_triplets])\n triplet_type = torch.cat([triplet_type, torch.LongTensor([0]*(max_triplets - T))])\n source_edges = torch.cat([source_edges, torch.LongTensor([vocab[\"pred_name_to_idx\"][\"__padding__\"]]*(max_triplets - T))])\n\n all_objs.append(attributes_objects)\n all_boxes.append(boxes)\n all_triplets.append(triplets)\n if masks is not None:\n all_masks.append(masks)\n else:\n all_masks = None\n all_triplet_type.append(triplet_type)\n all_source_edges.append(source_edges)\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.stack(all_objs, dim=0)\n all_boxes = torch.stack(all_boxes, dim=0)\n if all_masks is not None:\n all_masks = torch.stack(all_masks, dim=0)\n all_triplets = torch.stack(all_triplets, dim=0)\n # all_image_ids = torch.LongTensor(all_image_ids)\n all_triplet_type = torch.stack(all_triplet_type, dim=0)\n all_source_edges = torch.stack(all_source_edges, dim=0)\n all_image_ids = torch.LongTensor(all_image_ids)\n\n out = (all_imgs, all_objs, all_boxes, all_triplets, all_triplet_type, all_source_edges, all_masks, all_image_ids)\n return out", "def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def unbucketed_next(self):\n # Initialize batch containers\n label_batch = list()\n enc_input_batch = list()\n dec_input_batch = list()\n # Fill individual batches by iterating over the entire data source\n if self.sent_id < self.get_length():\n while len(enc_input_batch) < self.opt.batch_size:\n try:\n indexed_sent = self.data[self.sent_id]\n label_item = indexed_sent[1:]\n enc_input_item = indexed_sent[1:]\n # Reverse the input to the encoder, see arxiv.org/pdf/1703.03906.pdf\n enc_input_item.reverse()\n dec_input_item = indexed_sent[:-1]\n label_batch.append(label_item)\n enc_input_batch.append(enc_input_item)\n dec_input_batch.append(dec_input_item)\n self.sent_id += 1\n except IndexError:\n break\n else:\n raise IndexError\n return label_batch, enc_input_batch, dec_input_batch", "def batchify(data, batch_size, args):\n # Work out how cleanly we can divide the dataset into batch_size parts (i.e. continuous seqs).\n nbatch = data.size(0) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * batch_size)\n # Evenly divide the data across the batch_size batches.\n data = data.view(batch_size, -1)\n if args.cuda:\n data = data.cuda()\n return data", "def make_batch(self, data):\n\n padding_size = self.window_size // 2\n padding = [self.PAD_IDX for i in six.moves.xrange(padding_size)]\n padding = self.xp.array(padding, dtype=self.xp.int32)\n data_num = len(data)\n ids = []\n boundaries = []\n i = 0\n i_char = 0\n ids.append(padding)\n\n for words in data:\n if self.char_level_flag:\n # Char-level (don't lowercase)\n ids.append(words)\n i_char += len(words)\n else:\n # Word-level\n ids.append(words)\n ids.append(padding)\n i += padding_size\n boundaries.append(i)\n i += len(words)\n boundaries.append(i)\n ids = self.xp.concatenate(ids)\n return ids, boundaries, data_num", "def collater(self, samples):\n\n return dual_collate(\n samples, pad_idx=self.d1.src_dict.pad(), eos_idx=self.d1.src_dict.eos(),\n left_pad_source=self.d1.left_pad_source, left_pad_target=self.d1.left_pad_target,\n input_feeding=self.d1.input_feeding,\n )\n\n #prev_output_tokens doesn't match!\n #id doesn't match\n #both of these keys are lengths 248 for both dictionaries\n #length only captures the first dimension of a multidimensional tensor\n #248 is likely the batch size here\n #error occurs because of the sorting by descending source length in the collate method\n #may be possible to fix by replace the sort_order line with: sort_order = torch.LongTensor(range(len(id)))\n #also it seems like there's more keys in c1 and c2 than we explicitly account for here \n #also fix DualSourceSequenceGenerator.generate\n\n indexes = [sample['id'] for sample in samples]\n\n c1 = self.d1.collater([self.d1[index] for index in indexes])\n c2 = self.d2.collater([self.d2[index] for index in indexes])\n\n # c1 = self.d1.collater([self.d1[sample['id']] for sample in samples])\n # c2 = self.d2.collater([self.d2[sample['id']] for sample in samples])\n\n net_input1 = c1['net_input']; net_input2 = c2['net_input']\n net_input = {}\n for key in net_input1.keys():\n if 'src_' in key:\n net_input[key+'1'] = net_input1[key]\n elif key == 'prev_output_tokens':\n net_input[key] = net_input1[key]\n # elif key == 'ntokens':\n # net_input[key] = net_input1[key]\n else:\n raise AssertionError\n for key in net_input2.keys():\n if 'src_' in key:\n net_input[key+'2'] = net_input2[key]\n elif key == 'prev_output_tokens':\n if self.dual_decoder:\n net_input[key+'_extra'] = net_input2[key]\n else:\n # net_input[key] = net_input2[key]\n pass\n # err = \"NET_INPUT ASSERTION: \"+str(len(indexes))+\";\\n\"\n # err += str(len(net_input[key])) + \"\\t\" + str(net_input[key]) + \"\\n\"\n # err += str(len(net_input2[key])) + \"\\t\" + str(net_input2[key]) + \"\\n\"\n # assert False, err\n # if not net_input[key] == net_input2[key]:\n # print(\"NET_INPUT ASSERTION:\")\n # print(net_input[key])\n # print(net_input2[key])\n # raise AssertionError\n else:\n raise AssertionError\n\n c = {'net_input': net_input}\n for key in c1.keys():\n if key == 'target':\n c[key] = c1[key]\n elif key == 'ntokens':\n c[key] = c1[key]\n elif key == 'id' or key == 'nsentences':\n c[key] = c1[key]\n else:\n assert key == 'net_input',key\n for key in c2.keys():\n if key == 'target':\n c[key] = c2[key]\n elif key == 'ntokens':\n if 'target' not in samples[0]:\n c[key] += c2[key] # source tokens\n elif self.dual_decoder:\n c[key+'_extra'] = c2[key] # target tokens for decoder 2\n else:\n assert c[key] == c2[key], \"NTOKENS:\\n\"+str(c[key])+\"\\n\"+str(c2[key]) # target tokens for decoder\n elif key == 'id':\n # set1 = set(c[key])\n # set2 = set(c2[key])\n # assert set1 == set2\n assert False, \"ID: lengths: \"+str(len(indexes))+\"; \"+str(len(c[key]))+\", \"+str(len(c2[key]))+\"\\n\"+str(c[key][:10])+\"...\\n\"+str(c2[key][:10])+\"...\\n\" \n assert c[key] == c2[key], \"ID:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n elif key == 'nsentences':\n assert c[key] == c2[key], \"NSENT:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n else:\n assert key == 'net_input',key\n return c\n\n\n\n net_input1['src_tokens1'] = net_input1.pop('src_tokens') \n net_input1['src_lengths1'] = net_input1.pop('src_lengths')\n net_input1['src_tokens2'] = net_input2['src_tokens'] \n net_input1['src_lengths2'] = net_input2['src_lengths']\n\n if self.dual_decoder:\n net_input1['prev_output_tokens_extra'] = net_input2['prev_output_tokens']\n c1['target_extra'] = c2['target']\n c1['ntokens_extra'] = c2['ntokens']\n if 'target' not in samples[0]:\n #ntokens and ntokens_extra represent the total number of source tokens\n c1['ntokens'] = c1['ntokens'] + c2['ntokens']\n if 'ntokens_extra' in c1:\n c1['ntokens_extra'] = c1['ntokens']\n #else ntokens is the total number of target tokens\n return c1", "def _batch_to_json(self, batch, lengths):\n outputs = []\n cursor = 0\n for length in lengths:\n cursor_end = cursor + length\n\n mini_batch = batch[cursor:cursor_end]\n outputs.append(self._to_json(mini_batch))\n\n cursor = cursor_end\n return outputs" ]
[ "0.7257751", "0.64775294", "0.62918323", "0.62413275", "0.62413275", "0.62413275", "0.6130379", "0.6129091", "0.60867256", "0.6059193", "0.604119", "0.6024431", "0.60000926", "0.59829533", "0.59790826", "0.5951112", "0.590438", "0.5853374", "0.5758594", "0.5754777", "0.5751464", "0.57447696", "0.5739473", "0.5732152", "0.56916827", "0.56863946", "0.5656825", "0.56498754", "0.56425536", "0.5573121", "0.5573121", "0.5571393", "0.55662763", "0.5528734", "0.5517851", "0.55157787", "0.5481902", "0.5481167", "0.5457812", "0.5444077", "0.5443196", "0.54399604", "0.54391795", "0.54391795", "0.5419391", "0.5410765", "0.5405398", "0.5387215", "0.5386938", "0.53843814", "0.5372356", "0.5367777", "0.5366125", "0.53617615", "0.5355494", "0.5344551", "0.5344221", "0.5339525", "0.53380793", "0.53342754", "0.53321886", "0.5330868", "0.5317701", "0.53155476", "0.5313997", "0.5309912", "0.53028667", "0.53001976", "0.52999365", "0.5292439", "0.5289355", "0.52893543", "0.5277489", "0.52683043", "0.5258198", "0.5255203", "0.5252494", "0.5251329", "0.52429944", "0.5223662", "0.52228385", "0.52171826", "0.52161866", "0.5204579", "0.5203306", "0.5202525", "0.5199012", "0.51977515", "0.51821184", "0.51810056", "0.51798093", "0.5179626", "0.5177567", "0.51656306", "0.5160457", "0.51601243", "0.51453775", "0.5145043", "0.5140716", "0.5138031" ]
0.55312043
33
This is the reverse of `self.collate_fn`
def shatter_batch(self, batch): return [tuple([elem[i] for elem in batch]) for i in range(batch.size)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collate_fn(self, *args):\n return TupleMiniBatch(default_collate(*args))", "def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n raise NotImplementedError", "def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)", "def _init_collate(self, cfg: ConfigType) -> Callable:\n try:\n with FUNCTIONS.switch_scope_and_registry(self.scope) as registry:\n collate_fn = registry.get(cfg.test_dataloader.collate_fn)\n except AttributeError:\n collate_fn = pseudo_collate\n return collate_fn # type: ignore", "def clevr_collate_fn(data):\n\tdata = sorted(data, key=lambda x: len(x[1]), reverse=True)\n\timg, q, len_q, a, f, idx = list(zip(*data))\n\tq = torch.nn.utils.rnn.pad_sequence(q, batch_first=True)\n\treturn torch.stack(img), q, list(len_q), torch.stack(a), list(f), list(idx)", "def regular_collate_fn(data):\n\timg, box, q, a = list(zip(*data))\n\tq = torch.nn.utils.rnn.pad_sequence(q, batch_first=True)\n\treturn torch.stack(img), torch.stack(box), q, torch.stack(a).long()", "def _collate_fn(batch):\r\n batch = list(zip(*batch))\r\n batch[0] = torch.stack(batch[0])\r\n batch[1] = list(batch[1])\r\n batch[2] = torch.stack(batch[2])\r\n return tuple(batch)", "def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(data):\n # Sort by conversation length (descending order) to use 'pack_padded_sequence'\n data.sort(key=lambda x: x[1], reverse=True)\n\n # Separate\n sentences, conversation_length, sentence_length = zip(*data)\n\n # return sentences, conversation_length, sentence_length.tolist()\n return sentences, conversation_length, sentence_length", "def collate_fn(batch):\n\n flattened_batch = []\n for data in batch:\n num_examples = len(data['image'])\n for i in range(num_examples):\n flattened_batch.append({\n k: v[i] for k, v in data.items()\n })\n\n return default_collate(flattened_batch)", "def collate_fn(self, batch):\r\n batch = list(map(torch.stack, zip(*batch)))\r\n max_seq_len = torch.max(torch.sum(batch[1], 1)).item()\r\n for i in range(len(batch) - 1):\r\n if batch[i].size()[1] > max_seq_len:\r\n batch[i] = batch[i][:, :max_seq_len]\r\n if self.truncate_label:\r\n batch[-1] = batch[-1][:, :max_seq_len]\r\n return batch", "def customize_collate(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n # this is the main part to handle varied length data in a batch\n # batch = [data_tensor_1, data_tensor_2, data_tensor_3 ... ]\n # \n batch_new = pad_sequence(batch)\n \n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n\n # allocate the memory based on maximum numel\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n # this will go to loop in the last case\n return customize_collate([torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n \n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: customize_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple\n return elem_type(*(customize_collate(samples) \\\n for samples in zip(*batch)))\n elif isinstance(elem, container_abcs.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n \n # zip([[A, B, C], [a, b, c]]) -> [[A, a], [B, b], [C, c]]\n transposed = zip(*batch)\n return [customize_collate(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def _collate_else(batch, collate_func):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], slice):\n batch = default_collate([{\n 'start': sl.start,\n 'stop': sl.stop,\n 'step': 1 if sl.step is None else sl.step\n } for sl in batch])\n return batch\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping):\n # Hack the mapping collation implementation to print error info\n if _DEBUG:\n collated = {}\n try:\n for key in batch[0]:\n collated[key] = collate_func([d[key] for d in batch])\n except Exception:\n print('\\n!!Error collating key = {!r}\\n'.format(key))\n raise\n return collated\n else:\n return {key: collate_func([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [collate_func(samples) for samples in transposed]\n else:\n raise TypeError((error_msg.format(type(batch[0]))))", "def collate_fn(data):\n\toutput = dict()\n\n\tfor name in ['answer_ID','query_ID']:\n\t\toutput[name] = [ _[name] for _ in data]\n\n\n\tfor name in ['query_len','answer_len']:\n\t\ttemp = [ _[name] for _ in data]\t \n\t\toutput[name] = torch.stack(temp, dim=0) \n\t\n\t#deal with source and target\n\tfor name in ['answer','query']:\n\t\tlength = output['{0}_len'.format(name)]\n\t\tl = length.max().item()\n\n\t\tfor i in range(len(data)):\n\t\t\tif(l-length[i].item()>0):\n\t\t\t\tdata[i][name] = torch.cat([data[i][name],torch.zeros(l-length[i].item(),dtype=torch.long)],dim=-1)\n\n\t\ttemp = [ _[name] for _ in data]\n\t\t\n\t\toutput[name] = torch.stack(temp, dim=0).long()\n\t\t\n\n\treturn output", "def collate_fn(self, image_column_names: Optional[List] = None, per_gpu_batch_size: Optional[int] = None) -> Dict:\n fn = {}\n if self.requires_column_info:\n return NotImplementedError(\n f\"requires_column_info={self.requires_column_info} not implemented for OVD tasks.\"\n )\n\n fn.update(\n {\n self.image_key: PadCollator(pad_val=0),\n self.prompt_key: ListCollator(),\n self.image_meta_key: ListCollator(),\n }\n )\n return fn", "def _collate_fn(batch):\n def _pad(seqs, dtype=torch.float32):\n \"\"\" Pads a batch of sequences of varying seq_len. \"\"\"\n assert len(seqs) > 0 and all(x.shape[1:] == seqs[0].shape[1:] for x in seqs)\n lens = torch.LongTensor([len(x) for x in seqs])\n max_seq_len = torch.max(lens)\n\n # padded_seq_dims: (batch, max_seq_len, ...).\n padded_seq_dims = (len(seqs), max_seq_len,) + seqs[0].shape[1:]\n res = torch.zeros(padded_seq_dims, dtype=dtype)\n for i, seq in enumerate(seqs):\n src_len = lens[i]\n res[i, :src_len] = torch.Tensor(seq)\n return res, lens\n\n assert all(len(x) == 2 for x in batch)\n # (1, batch, (seq_len, 68, 3))\n frames, captions = zip(*batch)\n\n # Merge sequences (from tuple of 1D tensor to 2D tensor)\n # (batch, seq_len, ...)\n src_seqs, src_lens = _pad(frames, dtype=torch.float32)\n tgt_seqs, tgt_lens = _pad(captions, dtype=torch.long)\n return src_seqs, src_lens, tgt_seqs, tgt_lens", "def _collate_fn(batch):\n # imgs = [b[0] for b in batch]\n # labels = [b[1] for b in batch]\n # imgs = torch.stack(imgs, dim=0)\n # return [imgs, labels]\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n imgs = torch.cat(imgs, dim=0)\n labels = [l for sublist in labels for l in sublist]\n return [imgs, labels]", "def mycollate(batch):\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(error_msg_fmt.format(elem.dtype))\n\n return default_collate([torch.from_numpy(b) for b in batch])\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(batch[0], int_classes):\n return torch.tensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping):\n if 'dataset' not in batch[0] or batch[0]['dataset'].neib_samp not in ('sampling', 'best', 'relation'):\n return {key: default_collate([d[key] for d in batch]) for key in batch[0] if key not in ['weight','impt','dataset']}\n relations = batch[0]['dataset'].tr_grp\n if batch[0]['dataset'].neib_samp == 'relation':\n nodes2 = sum([d['impt'] for d in batch],[])\n else:\n w= sum([d['weight'] for d in batch], Counter())\n [w.pop(d['index'], None) for d in batch] \n if batch[0]['dataset'].neib_samp == 'sampling':\n p = FlexCounter(w)/sum(w.values())\n nodes2 = np.random.choice(list(p.keys()), batch[0]['dataset'].k, replace=False, p=list(p.values()))\n elif batch[0]['dataset'].neib_samp == 'best':\n nodes2 = nlargest(batch[0]['dataset'].k, w, key = w.get) \n \n neib_batch = [batch[0]['dataset']._getimage(x,True,1) for x in nodes2]\n [(d.pop('weight', None), d.pop('dataset', None)) for d in batch]\n batch = neib_batch + batch\n coll = default_collate(batch)\n adj_mats = {r: np.zeros((len(batch), len(batch))) for r in relations}\n for r in relations:\n for i, b1 in enumerate(coll[r]):\n for j, b2 in enumerate(coll[r]):\n if i!=j:\n adj_mats[r][i,j] = 1 if b1==b2 else 0\n adj_mats[r] = adj_norm(adj_mats[r]) \n coll['adj'] = adj_mats\n coll['k'] = len(nodes2)\n return coll\n \n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg_fmt.format(type(batch[0]))))", "def collate_fn(batch):\n pad_index = 1 # the <PAD> index in vocabulary\n src_list = [sample[0] for sample in batch] # list of each language sentences\n trg_list = [sample[1] for sample in batch]\n\n def padding(sentence_list):\n \"\"\"padding each sentence to the right\"\"\"\n max_len = max([sentence.size(0) for sentence in sentence_list])\n pad_sen = [sen.tolist() + [pad_index] * max(0, max_len - len(sen))\n for sen in sentence_list]\n return torch.LongTensor(pad_sen).transpose(0, 1) # shape of (T, B)\n\n return padding(src_list), padding(trg_list)", "def collate_batch(self) -> Dict[str, Any]:\n pass", "def collate_fn(data):\n # Sort a data list by caption length\n images, captions, cap_mask, vision_mask, labels, vision_labels = zip(*data)\n\n images = torch.stack(images, 0)\n labels = torch.stack(labels, 0)\n vision_labels = torch.stack(vision_labels, 0).long()\n targets = torch.stack(captions, 0).long()\n cap_mask = torch.stack(cap_mask,0).long()\n vision_mask = torch.stack(vision_mask,0).long()\n\n return images, targets, cap_mask, vision_mask, labels, vision_labels", "def collate_fn(data):\n\n # Sort a data list by tweet length (descending order).\n # data.sort(key=lambda x: len(x[1]), reverse=True)\n texts_, targets_, relations = zip(*data)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(text) for text in texts_]\n texts = torch.zeros(len(texts_), max(lengths)).long()\n for i, text in enumerate(texts_):\n end = lengths[i]\n texts[i, :end] = text[:end]\n\n lengths_targets = [len(text) for text in targets_]\n targets = torch.zeros(len(targets_), max(lengths_targets)).long()\n for i, text in enumerate(targets_):\n end = lengths_targets[i]\n targets[i, :end] = text[:end]\n return targets, lengths, texts, torch.tensor(relations).view(-1)", "def get_collate_for_dataset(\n dataset: Union[Dataset, ConcatDataset], ensure_collate_fn_are_the_same: bool = True\n) -> Callable:\n collate_fn = default_collate\n\n if hasattr(dataset, \"get_collate_fn\"):\n return dataset.get_collate_fn()\n elif isinstance(dataset, ConcatDataset):\n collate_fns = [get_collate_for_dataset(ds) for ds in dataset.datasets]\n collate_fn = collate_fns[0]\n\n if ensure_collate_fn_are_the_same:\n for other_collate_fn in collate_fns[1:]:\n if type(other_collate_fn) != type(collate_fn):\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {type(collate_fn)} and {type(other_collate_fn)}.\"\n )\n\n if isinstance(collate_fn, functools.partial):\n if not _partial_functions_equal(collate_fn, other_collate_fn):\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {collate_fn} and {type(other_collate_fn)}.\"\n )\n elif collate_fn != other_collate_fn:\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {collate_fn} and {other_collate_fn}.\"\n )\n\n collate_fn = collate_fns[0]\n\n return collate_fn", "def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]", "def pad_collate_fn(batch):\n length = [len(sentence) for sentence in batch]\n return pad_sequence([torch.LongTensor(s) for s in batch], batch_first=True), torch.LongTensor(length)", "def seq_collate_fn(batch):\n idx, seq, seq_lengths = zip(*batch)\n idx = torch.tensor(idx)\n seq = torch.stack(seq)\n seq_lengths = torch.tensor(seq_lengths)\n _, sorted_seq_length_indices = torch.sort(seq_lengths)\n sorted_seq_length_indices = sorted_seq_length_indices.flip(0)\n sorted_seq_lengths = seq_lengths[sorted_seq_length_indices]\n\n T_max = torch.max(seq_lengths)\n mini_batch = seq[sorted_seq_length_indices, 0:T_max, :]\n mini_batch_reversed = reverse_sequence(mini_batch, sorted_seq_lengths)\n mini_batch_mask = get_mini_batch_mask(mini_batch, sorted_seq_lengths)\n\n return mini_batch, mini_batch_reversed, mini_batch_mask, sorted_seq_lengths", "def collate_fn_bert(data):\n # sort a data list by caption length\n data.sort(key=lambda x: x[4].shape[1], reverse=True)\n zipped_data = list(zip(*data))\n whole_length_max = zipped_data[4][0].shape[1]\n # align_tensor = len(tokenized_caption) * len(whole_caption)\n images, captions, ids, img_ids, align_tensors = zipped_data\n images = torch.stack(images, 0)\n lengths = [len(cap) for cap in captions]\n length_max = max(lengths)\n lengths_whole = [align.shape[1] for align in align_tensors]\n targets = torch.zeros(len(captions), length_max).long()\n targets_aligns = torch.zeros(len(captions), length_max, whole_length_max).to(torch.float32)\n for i, tup in enumerate(zip(captions, align_tensors)):\n cap, align_tensor = tup\n end = len(cap)\n tokenized_l = align_tensor.shape[0]\n whole_l = align_tensor.shape[1]\n #import ipdb; ipdb.set_trace()\n targets[i, :end] = cap[:end]\n targets_aligns[i, :tokenized_l, :whole_l]\n return images, targets, lengths, ids, targets_aligns, lengths_whole", "def create_sentence_pairs_collate_fn(PAD_token, fixed_sequence_length=None):\n\n if fixed_sequence_length:\n logger.info(f\"Using fixed sequence lengths of {fixed_sequence_length} tokens.\")\n\n def collate_fn(indexed_sentence_pairs):\n # Why is the sort required?\n # ==> This is a CuDNN requirement\n # ==> https://discuss.pytorch.org/t/why-lengths-should-be-given-in-sorted-order-in-pack-padded-sequence/3540\n # ==> Apparently solved now?\n indexed_sentence_pairs.sort(key=lambda pair: len(pair[0]), reverse=True)\n\n input_batch, output_batch = [], []\n for pair in indexed_sentence_pairs:\n input_batch.append(pair[0])\n output_batch.append(pair[1])\n\n # ############# PROCESS INPUT BATCH #############\n input_lengths = torch.tensor([len(indexed_sentence) for indexed_sentence in input_batch], dtype=torch.short)\n # Batch dimension should be second (in order to partition over multiple GPUs)\n input_lengths = input_lengths.unsqueeze(0)\n\n if fixed_sequence_length:\n padded_input_batch = torch.ones(fixed_sequence_length, len(input_batch), dtype=torch.long) * PAD_token\n for idx, indexed_sentence in enumerate(input_batch):\n padded_input_batch[0:len(indexed_sentence), idx] = torch.LongTensor(indexed_sentence)\n else:\n padded_input_batch = zeroPadding(input_batch, PAD_token)\n padded_input_batch = torch.LongTensor(padded_input_batch)\n\n ################################################\n\n # ############# PROCESS OUTPUT BATCH ############\n if fixed_sequence_length:\n max_output_len = fixed_sequence_length\n padded_output_batch = torch.ones(fixed_sequence_length, len(output_batch), dtype=torch.long) * PAD_token\n for idx, indexed_sentence in enumerate(output_batch):\n padded_output_batch[0:len(indexed_sentence), idx] = torch.LongTensor(indexed_sentence)\n output_mask = padded_output_batch != PAD_token\n else:\n max_output_len = max([len(indexed_sentence) for indexed_sentence in output_batch])\n padded_output_batch = zeroPadding(output_batch, PAD_token)\n\n output_mask = binaryMatrix(padded_output_batch, PAD_token)\n output_mask = torch.BoolTensor(output_mask)\n\n padded_output_batch = torch.LongTensor(padded_output_batch)\n ################################################\n\n return padded_input_batch, input_lengths, padded_output_batch, output_mask, max_output_len\n\n return collate_fn", "def customize_collate_from_batch(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n batch_new = pad_sequence(batch) \n out = None\n if torch.utils.data.get_worker_info() is not None:\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n # here is the difference\n return torch.cat(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n return customize_collate_from_batch(\n [torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, tuple):\n # concatenate two tuples\n tmp = elem\n for tmp_elem in batch[1:]:\n tmp += tmp_elem \n return tmp\n elif isinstance(elem, container_abcs.Sequence):\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n transposed = zip(*batch)\n return [customize_collate_from_batch(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def invalid_collate(batch):\n batch = list(filter(lambda x: x[0] is not None, batch))\n\n if len(batch) == 0:\n return batch\n\n return default_collate(batch)", "def list_data_collate(batch: Sequence):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n key = None\n try:\n if config.USE_META_DICT:\n data = pickle_operations(data) # bc 0.9.0\n if isinstance(elem, Mapping):\n ret = {}\n for k in elem:\n key = k\n data_for_batch = [d[key] for d in data]\n ret[key] = collate_meta_tensor(data_for_batch)\n else:\n ret = collate_meta_tensor(data)\n return ret\n except RuntimeError as re:\n re_str = str(re)\n if \"equal size\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create images of different shapes, creating your \"\n + \"`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its \"\n + \"documentation).\"\n )\n _ = dev_collate(data)\n raise RuntimeError(re_str) from re\n except TypeError as re:\n re_str = str(re)\n if \"numpy\" in re_str and \"Tensor\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create mixtures of torch Tensor and numpy ndarray, \"\n + \"creating your `DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem \"\n + \"(check its documentation).\"\n )\n _ = dev_collate(data)\n raise TypeError(re_str) from re", "def collate_fn(data, device=default_device):\n # batch.sort(key=lambda x: len(x[1]), reverse=True)\n has_mask_tensor = True if data[0][-1] is not None else False\n input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor = zip(*data)\n\n input_tensor, input_lengths = padSequence(input_tensor)\n target_tensor, target_lengths = padSequence(target_tensor)\n bs_tensor = torch.as_tensor(bs_tensor, dtype=torch.float, device=device)\n db_tensor = torch.as_tensor(db_tensor, dtype=torch.float, device=device)\n mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if has_mask_tensor else None\n # mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if mask_tensor[0] and mask_tensor[0] != [] else None\n\n # data = input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor\n # if torch.cuda.is_available():\n # data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n return input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor # tensors [batch_size, *]", "def collate_fn(data):\n # sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n zipped_data = list(zip(*data))\n # align_tensor = len(tokenized_caption) * len(whole_caption)\n images, captions, ids, img_ids, = zipped_data\n images = torch.stack(images, 0)\n targets = torch.zeros(len(captions), len(captions[0])).long()\n lengths = [len(cap) for cap in captions]\n for i, cap in enumerate(captions):\n end = len(cap)\n targets[i, :end] = cap[:end]\n return images, targets, lengths, ids", "def collate_fn(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels", "def dev_collate(batch, level: int = 1, logger_name: str = \"dev_collate\"):\n elem = batch[0]\n elem_type = type(elem)\n l_str = \">\" * level\n batch_str = f\"{batch[:10]}{' ... ' if len(batch) > 10 else ''}\"\n if isinstance(elem, torch.Tensor):\n try:\n logging.getLogger(logger_name).critical(f\"{l_str} collate/stack a list of tensors\")\n return torch.stack(batch, 0)\n except TypeError as e:\n logging.getLogger(logger_name).critical(\n f\"{l_str} E: {e}, type {[type(elem).__name__ for elem in batch]} in collate({batch_str})\"\n )\n return\n except RuntimeError as e:\n logging.getLogger(logger_name).critical(\n f\"{l_str} E: {e}, shape {[elem.shape for elem in batch]} in collate({batch_str})\"\n )\n return\n elif elem_type.__module__ == \"numpy\" and elem_type.__name__ != \"str_\" and elem_type.__name__ != \"string_\":\n if elem_type.__name__ in [\"ndarray\", \"memmap\"]:\n logging.getLogger(logger_name).critical(f\"{l_str} collate/stack a list of numpy arrays\")\n return dev_collate([torch.as_tensor(b) for b in batch], level=level, logger_name=logger_name)\n elif elem.shape == (): # scalars\n return batch\n elif isinstance(elem, (float, int, str, bytes)):\n return batch\n elif isinstance(elem, abc.Mapping):\n out = {}\n for key in elem:\n logging.getLogger(logger_name).critical(f'{l_str} collate dict key \"{key}\" out of {len(elem)} keys')\n out[key] = dev_collate([d[key] for d in batch], level=level + 1, logger_name=logger_name)\n return out\n elif isinstance(elem, abc.Sequence):\n it = iter(batch)\n els = list(it)\n try:\n sizes = [len(elem) for elem in els] # may not have `len`\n except TypeError:\n types = [type(elem).__name__ for elem in els]\n logging.getLogger(logger_name).critical(f\"{l_str} E: type {types} in collate({batch_str})\")\n return\n logging.getLogger(logger_name).critical(f\"{l_str} collate list of sizes: {sizes}.\")\n if any(s != sizes[0] for s in sizes):\n logging.getLogger(logger_name).critical(\n f\"{l_str} collate list inconsistent sizes, got size: {sizes}, in collate({batch_str})\"\n )\n transposed = zip(*batch)\n return [dev_collate(samples, level=level + 1, logger_name=logger_name) for samples in transposed]\n logging.getLogger(logger_name).critical(f\"{l_str} E: unsupported type in collate {batch_str}.\")\n return", "def _collater(batch):\n return batch[0]", "def collater(self, samples):\r\n return collate(\r\n samples, self.src_dict, self.tgt_dict,\r\n left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,\r\n max_sent_len=self.max_sent_len,\r\n mask_other_sents=self.mask_other_sents\r\n )", "def list_data_collate(batch):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n return default_collate(data)", "def collate_fn(data):\n # Sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions, bboxes, depends, ids, img_ids = zip(*data)\n\n # Merge images (convert tuple of 3D tensor to 4D tensor)\n images = torch.stack(images, 0)\n bboxes = torch.stack(bboxes, 0)\n\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n\n return images, targets, bboxes, depends, lengths, ids", "def collate_fn(data):\r\n\r\n # sort data by caption length\r\n data.sort(key=lambda x: len(x[1]), reverse=True)\r\n images, captions = zip(*data)\r\n\r\n # Merge image tensors (stack)\r\n images = torch.stack(images, 0)\r\n\r\n # Merge captions\r\n caption_lengths = [len(caption) for caption in captions]\r\n\r\n # zero-matrix num_captions x caption_max_length\r\n padded_captions = torch.zeros(len(captions), max(caption_lengths)).long()\r\n\r\n # fill the zero-matrix with captions. the remaining zeros are padding\r\n for idx, caption in enumerate(captions):\r\n end = caption_lengths[idx]\r\n padded_captions[idx, :end] = caption[:end]\r\n return images, padded_captions, caption_lengths", "def collate_fn(data):\r\n # Sort a data list by caption length\r\n data.sort(key=lambda x: len(x[1]), reverse=True)\r\n\r\n images, captions, ids, img_ids = zip(*data)\r\n\r\n # Merge images (convert tuple of 3D tensor to 4D tensor)\r\n images = torch.stack(images, 0)\r\n\r\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\r\n lengths = torch.LongTensor([len(cap) for cap in captions])\r\n targets = torch.zeros(len(captions), max(lengths)).long()\r\n for i, cap in enumerate(captions):\r\n end = lengths[i]\r\n targets[i, :end] = cap[:end]\r\n\r\n return images, targets, lengths, ids", "def individual_collate(batch):\n\n data = batch\n\n collected_data = defaultdict(list)\n\n for i in range(len(list(data))):\n for k in data[i].keys():\n collected_data[k].append(data[i][k])\n\n for k in collected_data.keys():\n collected_data[k] = torch.stack(collected_data[k])\n\n return collected_data", "def custom_collate_fn(batch):\n images, bboxes, context_indices, labels = zip(*batch)\n # images = (img_1, ..., img_N) each element of size [3, img_H, img_W]\n # bboxes = (bboxes_1, ..., bboxes_N) each element of size [n_bboxes_in_image, 4]\n # context_indices = (ci_1, ..., ci_N) each element of size [n_bboxes_in_image, 2*context_size]\n # labels = (labels_1, ..., labels_N) each element of size [n_bboxes_in_image]\n \n images = torch.stack(images, 0)\n \n bboxes_with_batch_index = []\n observed_bboxes = 0\n for i, bbox in enumerate(bboxes):\n batch_indices = torch.Tensor([i]*bbox.shape[0]).view(-1,1)\n bboxes_with_batch_index.append(torch.cat((batch_indices, bbox), dim=1))\n context_indices[i][context_indices[i] != -1] += observed_bboxes\n observed_bboxes += bbox.shape[0]\n bboxes_with_batch_index = torch.cat(bboxes_with_batch_index)\n context_indices = torch.cat(context_indices)\n \n labels = torch.cat(labels)\n \n return images, bboxes_with_batch_index, context_indices, labels", "def test_custom_collate() -> None:\n metadata = PatientMetadata(patient_id='42')\n foo = \"foo\"\n d1 = {foo: 1, SAMPLE_METADATA_FIELD: \"something\"}\n d2 = {foo: 2, SAMPLE_METADATA_FIELD: metadata}\n result = collate_with_metadata([d1, d2])\n assert foo in result\n assert SAMPLE_METADATA_FIELD in result\n assert isinstance(result[SAMPLE_METADATA_FIELD], list)\n assert result[SAMPLE_METADATA_FIELD] == [\"something\", metadata]\n assert isinstance(result[foo], torch.Tensor)\n assert result[foo].tolist() == [1, 2]", "def _var_len_collate_fn(batch):\n def func(p):\n return p[0].size(1)\n\n longest_sample = max(batch, key=func)[0]\n freq_size = longest_sample.size(2)\n minibatch_size = len(batch)\n max_seqlength = longest_sample.size(1)\n inputs = torch.zeros(minibatch_size, 1, max_seqlength, freq_size)\n targets = []\n seq_lengths = []\n for x in range(minibatch_size):\n sample = batch[x]\n tensor = sample[0]\n target = sample[1]\n seq_length = tensor.size(1)\n seq_lengths.append(seq_length)\n inputs[x].narrow(1, 0, seq_length).copy_(tensor)\n targets.append(target)\n targets = torch.LongTensor(targets)\n return seq_lengths, inputs, targets", "def collate_fn(batch):\n sentence1 = [item[0] for item in batch]\n sentence2 = [item[1] for item in batch]\n label = [item[2] for item in batch]\n label = torch.tensor(label)\n return sentence1, sentence2, label", "def get_collate_fn(mixer_name: str, alpha: float) -> Callable:\n fn = cutmix if mixer_name == \"cutmix\" else mixup\n collate_fn = CustomCollate(alpha=alpha, mixer=fn)\n return collate_fn", "def default_collate(batch):\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(error_msg_fmt.format(elem.dtype))\n\n return default_collate([torch.from_numpy(b) for b in batch])\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(batch[0], int_classes):\n return torch.tensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping): \n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg_fmt.format(type(batch[0]))))", "def collate_fn(batch):\n text = [item[0] for item in batch]\n audio = [item[1] for item in batch]\n\n text_lengths = [len(x) for x in text]\n audio_lengths = [len(x) for x in audio]\n\n max_text = max(text_lengths)\n max_audio = max(audio_lengths)\n\n text_batch = np.stack(pad_text(x, max_text) for x in text)\n audio_batch = np.stack(pad_spectrogram(x, max_audio) for x in audio)\n\n return (torch.LongTensor(text_batch),\n torch.FloatTensor(audio_batch).permute(1, 0, 2),\n text_lengths, audio_lengths)", "def _default_collate(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, pt.Tensor):\n out = None\n if pt.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return pt.stack(batch, 0, out=out)\n elif (\n elem_type.__module__ == \"numpy\"\n and elem_type.__name__ != \"str_\"\n and elem_type.__name__ != \"string_\"\n ):\n elem = batch[0]\n if elem_type.__name__ == \"ndarray\":\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(\n _default_collate_err_msg_format.format(elem.dtype)\n )\n return _default_collate([pt.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return pt.as_tensor(batch)\n elif isinstance(elem, float):\n return pt.tensor(batch, dtype=pt.float)\n elif isinstance(elem, int_classes):\n return pt.tensor(batch, dtype=pt.long)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: _default_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, \"_fields\"): # namedtuple\n return elem_type(\n *(_default_collate(samples) for samples in zip(*batch))\n )\n elif isinstance(elem, container_abcs.Sequence):\n transposed = zip(*batch)\n return [_default_collate(samples) for samples in transposed]\n elif elem is None:\n return None\n\n raise TypeError(_default_collate_err_msg_format.format(elem_type))", "def collate_fn(self, batch):\n # Sort a data list by caption length (descending order).\n #sample.sort(key=lambda x: len(x[1]), reverse=True)\n images, words = [b.get('image') for b in batch], [b.get('word') for b in batch]\n \n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n \n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(word) for word in words]\n targets = torch.zeros(sum(lengths)).long()\n lengths = torch.tensor(lengths)\n for j, word in enumerate(words):\n start = sum(lengths[:j])\n end = lengths[j]\n targets[start:start+end] = torch.tensor([self.ds.char_dict.get(letter) for letter in word]).long()\n \n if self.device == 'cpu':\n dev = torch.device('cpu')\n else:\n dev = torch.device('cuda')\n return images.to(dev), targets.to(dev), lengths.to(dev)", "def mapfn(k, v):\n for row in v:\n # completar\n pass", "def test_collation_register_twice(self):\n con = sqlite.connect(\":memory:\")\n con.create_collation(\"mycoll\", lambda x, y: (x > y) - (x < y))\n con.create_collation(\"mycoll\", lambda x, y: -((x > y) - (x < y)))\n result = con.execute(\"\"\"\n select x from (select 'a' as x union select 'b' as x) order by x collate mycoll\n \"\"\").fetchall()\n self.assertEqual(result[0][0], 'b')\n self.assertEqual(result[1][0], 'a')", "def collate_fn(data: list):\n def pad_tensor(inp):\n assert type(inp[0]) == torch.Tensor\n it = iter(inp)\n t = next(it)\n max_shape = list(t.shape)\n while True:\n try:\n t = next(it)\n for i in range(len(max_shape)):\n max_shape[i] = int(max(max_shape[i], t.shape[i]))\n except StopIteration:\n break\n max_shape = np.array(max_shape)\n\n padded_ts = []\n for t in inp:\n pad_pattern = np.zeros(2 * len(max_shape), dtype=np.int64)\n pad_pattern[::-2] = max_shape - np.array(t.shape)\n pad_pattern = tuple(pad_pattern.tolist())\n padded_ts.append(F.pad(t, pad_pattern, 'constant', 0))\n\n return padded_ts\n\n def stack(inp):\n if type(inp[0]) == list:\n ret = []\n for vs in zip(*inp):\n ret.append(stack(vs))\n elif type(inp[0]) == dict:\n ret = {}\n for kvs in zip(*[x.items() for x in inp]):\n ks, vs = zip(*kvs)\n for k in ks:\n assert k == ks[0], \"Key value mismatch.\"\n ret[k] = stack(vs)\n elif type(inp[0]) == torch.Tensor:\n new_t = pad_tensor(inp)\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == np.ndarray:\n new_t = pad_tensor([torch.from_numpy(x) for x in inp])\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == str:\n ret = inp\n else:\n raise ValueError('Cannot handle type {}'.format(type(inp[0])))\n return ret\n\n ret = stack(data)\n\n # compute CPU-intensive matrix K1, K2 here to leverage multi-processing nature of dataloader\n # if 'Gs' in ret and 'Hs' in ret and :\n # try:\n # G1_gt, G2_gt = ret['Gs']\n # H1_gt, H2_gt = ret['Hs']\n # sparse_dtype = np.float32\n # K1G = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(G2_gt, G1_gt)] # 1 as source graph, 2 as target graph\n # K1H = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(H2_gt, H1_gt)]\n # K1G = CSRMatrix3d(K1G)\n # K1H = CSRMatrix3d(K1H).transpose()\n #\n # ret['Ks'] = K1G, K1H #, K1G.transpose(keep_type=True), K1H.transpose(keep_type=True)\n # except ValueError:\n # pass\n\n return ret", "def collater(self, samples):\n\n return dual_collate(\n samples, pad_idx=self.d1.src_dict.pad(), eos_idx=self.d1.src_dict.eos(),\n left_pad_source=self.d1.left_pad_source, left_pad_target=self.d1.left_pad_target,\n input_feeding=self.d1.input_feeding,\n )\n\n #prev_output_tokens doesn't match!\n #id doesn't match\n #both of these keys are lengths 248 for both dictionaries\n #length only captures the first dimension of a multidimensional tensor\n #248 is likely the batch size here\n #error occurs because of the sorting by descending source length in the collate method\n #may be possible to fix by replace the sort_order line with: sort_order = torch.LongTensor(range(len(id)))\n #also it seems like there's more keys in c1 and c2 than we explicitly account for here \n #also fix DualSourceSequenceGenerator.generate\n\n indexes = [sample['id'] for sample in samples]\n\n c1 = self.d1.collater([self.d1[index] for index in indexes])\n c2 = self.d2.collater([self.d2[index] for index in indexes])\n\n # c1 = self.d1.collater([self.d1[sample['id']] for sample in samples])\n # c2 = self.d2.collater([self.d2[sample['id']] for sample in samples])\n\n net_input1 = c1['net_input']; net_input2 = c2['net_input']\n net_input = {}\n for key in net_input1.keys():\n if 'src_' in key:\n net_input[key+'1'] = net_input1[key]\n elif key == 'prev_output_tokens':\n net_input[key] = net_input1[key]\n # elif key == 'ntokens':\n # net_input[key] = net_input1[key]\n else:\n raise AssertionError\n for key in net_input2.keys():\n if 'src_' in key:\n net_input[key+'2'] = net_input2[key]\n elif key == 'prev_output_tokens':\n if self.dual_decoder:\n net_input[key+'_extra'] = net_input2[key]\n else:\n # net_input[key] = net_input2[key]\n pass\n # err = \"NET_INPUT ASSERTION: \"+str(len(indexes))+\";\\n\"\n # err += str(len(net_input[key])) + \"\\t\" + str(net_input[key]) + \"\\n\"\n # err += str(len(net_input2[key])) + \"\\t\" + str(net_input2[key]) + \"\\n\"\n # assert False, err\n # if not net_input[key] == net_input2[key]:\n # print(\"NET_INPUT ASSERTION:\")\n # print(net_input[key])\n # print(net_input2[key])\n # raise AssertionError\n else:\n raise AssertionError\n\n c = {'net_input': net_input}\n for key in c1.keys():\n if key == 'target':\n c[key] = c1[key]\n elif key == 'ntokens':\n c[key] = c1[key]\n elif key == 'id' or key == 'nsentences':\n c[key] = c1[key]\n else:\n assert key == 'net_input',key\n for key in c2.keys():\n if key == 'target':\n c[key] = c2[key]\n elif key == 'ntokens':\n if 'target' not in samples[0]:\n c[key] += c2[key] # source tokens\n elif self.dual_decoder:\n c[key+'_extra'] = c2[key] # target tokens for decoder 2\n else:\n assert c[key] == c2[key], \"NTOKENS:\\n\"+str(c[key])+\"\\n\"+str(c2[key]) # target tokens for decoder\n elif key == 'id':\n # set1 = set(c[key])\n # set2 = set(c2[key])\n # assert set1 == set2\n assert False, \"ID: lengths: \"+str(len(indexes))+\"; \"+str(len(c[key]))+\", \"+str(len(c2[key]))+\"\\n\"+str(c[key][:10])+\"...\\n\"+str(c2[key][:10])+\"...\\n\" \n assert c[key] == c2[key], \"ID:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n elif key == 'nsentences':\n assert c[key] == c2[key], \"NSENT:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n else:\n assert key == 'net_input',key\n return c\n\n\n\n net_input1['src_tokens1'] = net_input1.pop('src_tokens') \n net_input1['src_lengths1'] = net_input1.pop('src_lengths')\n net_input1['src_tokens2'] = net_input2['src_tokens'] \n net_input1['src_lengths2'] = net_input2['src_lengths']\n\n if self.dual_decoder:\n net_input1['prev_output_tokens_extra'] = net_input2['prev_output_tokens']\n c1['target_extra'] = c2['target']\n c1['ntokens_extra'] = c2['ntokens']\n if 'target' not in samples[0]:\n #ntokens and ntokens_extra represent the total number of source tokens\n c1['ntokens'] = c1['ntokens'] + c2['ntokens']\n if 'ntokens_extra' in c1:\n c1['ntokens_extra'] = c1['ntokens']\n #else ntokens is the total number of target tokens\n return c1", "def __init__(self, *args, **kwargs):\n super(AudioDataLoader, self).__init__(*args, **kwargs)\n self.collate_fn = _collate_fn", "def collate_frame_gru_fn(data):\n # Sort a data list by caption length\n if data[0][1] is not None:\n data.sort(key=lambda x: len(x[1]), reverse=True)\n videos, captions, cap_bows, tokens_tensor, segments_tensors, caption_text = zip(*data)\n\n # Merge videos (convert tuple of 1D tensor to 4D tensor)\n video_lengths = [min(VIDEO_MAX_LEN, len(frame)) for frame in videos]\n frame_vec_len = len(videos[0][0])\n vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)\n videos_origin = torch.zeros(len(videos), frame_vec_len)\n vidoes_mask = torch.zeros(len(videos), max(video_lengths))\n for i, frames in enumerate(videos):\n end = video_lengths[i]\n vidoes[i, :end, :] = frames[:end, :]\n videos_origin[i, :] = torch.mean(frames, 0)\n vidoes_mask[i, :end] = 1.0\n\n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n target = torch.zeros(len(captions), max(lengths)).long()\n words_mask = torch.zeros(len(captions), max(lengths))\n for i, cap in enumerate(captions):\n end = lengths[i]\n target[i, :end] = cap[:end]\n words_mask[i, :end] = 1.0\n else:\n target = None\n lengths = None\n words_mask = None\n\n # 'BERT Process'\n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths_bert = [len(seg) for seg in segments_tensors]\n tokens_tensor_padded = torch.zeros(len(tokens_tensor), max(lengths_bert)).long()\n segments_tensors_padded = torch.zeros(len(segments_tensors), max(lengths_bert)).long()\n words_mask_bert = torch.zeros(len(tokens_tensor), max(lengths_bert))\n\n for i, cap in enumerate(tokens_tensor):\n end = lengths_bert[i]\n tokens_tensor_padded[i, :end] = cap[:end]\n words_mask_bert[i, :end] = 1.0\n for i, cap in enumerate(segments_tensors):\n end = lengths_bert[i]\n segments_tensors_padded[i, :end] = cap[:end]\n\n\n else:\n lengths_bert = None\n tokens_tensor_padded = None\n segments_tensors_padded = None\n words_mask_bert = None\n\n cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None\n\n video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)\n text_data = (target, cap_bows, lengths, words_mask, tokens_tensor_padded, segments_tensors_padded, lengths_bert)\n\n return video_data, text_data", "def trivial_batch_collator(batch):\n return batch", "def trivial_batch_collator(batch):\n return batch", "def trivial_batch_collator(batch):\n return batch", "def batch_collate_fn(batch):\n images = []\n masks = []\n \n for (image, trimap, mask) in batch:\n mask = mask.unsqueeze(0)\n trimap = trimap.unsqueeze(0)\n image = torch.cat([image, trimap], 0).unsqueeze(0)\n \n images.append(image)\n masks.append(mask)\n\n images = torch.cat(images, 0)\n masks = torch.cat(masks, 0)\n\n return (images, masks)", "def fn_s(fn):\r\n c.execute(\"SELECT * FROM personnel WHERE first=:first COLLATE NOCASE\", {'first': fn})\r\n return c.fetchall()", "def custom_collate(batch):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n matched = True\n for dim in range(batch[0].dim()):\n lst = list(map(lambda x: x.size(dim), batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return torch.stack(batch, 0, out=out)\n else:\n return pad_sequence(batch, batch_first=True)\n # indices, items = zip(*sorted(enumerate(batch), key=lambda x: x[1].size(0), reverse=True))\n # lengths = [batch[i].size(0) for i in indices]\n # logger.info(lengths)\n # return pad_sequence([batch[i] for i in indices], batch_first=True), lengths\n elif isinstance(batch[0], np.ndarray):\n matched = True\n for dim in range(batch[0].ndim):\n lst = list(map(lambda x: x.shape[dim], batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return np.stack(batch, 0)\n else:\n raise ValueError('dimensions are not matched {}'.format(batch[0].shape))\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n raise ValueError('cannot handle numpy data')\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.abc.Mapping):\n return {key: custom_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.abc.Sequence):\n transposed = zip(*batch)\n return [custom_collate(samples) for samples in transposed]\n raise TypeError((error_msg.format(type(batch[0]))))", "def collate_fn(data):\n images, idxs, captions = zip(*data)\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n batch_size = images.shape[0]\n # p\n nums = []\n for idx in idxs:\n num = [0] * num_concept\n for id in idx:\n num[id[1]] = 1\n nums.append(num)\n concepts = torch.FloatTensor(nums)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n return images,concepts, targets", "def preMerge(self, t):\n\t\tif t.table.LookupList:\n\t\t\tlookupMap = {i:id(v) for i,v in enumerate(t.table.LookupList.Lookup)}\n\t\t\tt.table.LookupList.mapLookups(lookupMap)\n\t\t\tif t.table.FeatureList:\n\t\t\t\t# XXX Handle present FeatureList but absent LookupList\n\t\t\t\tt.table.FeatureList.mapLookups(lookupMap)\n\n\t\tif t.table.FeatureList and t.table.ScriptList:\n\t\t\tfeatureMap = {i:id(v) for i,v in enumerate(t.table.FeatureList.FeatureRecord)}\n\t\t\tt.table.ScriptList.mapFeatures(featureMap)", "def process_and_merge(s):\n l = [preprocessing(df) for df in s]\n d = {x.name: x for x in l}\n df = pd.DataFrame(d)\n df.index.names = [x.lower() for x in df.index.names]\n return pd.DataFrame(d)", "def vg_collate_fn(batch):\n # batch is a list, and each element is (image, objs, boxes, triples)\n all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img = [], [], [], [], []\n # obj_offset = 0\n for i, (img, objs, boxes, masks) in enumerate(batch):\n all_imgs.append(img[None])\n O = objs.size(0)\n all_objs.append(objs)\n all_boxes.append(boxes)\n all_masks.append(masks)\n\n all_obj_to_img.append(torch.LongTensor(O).fill_(i))\n # obj_offset += O\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.cat(all_objs)\n all_boxes = torch.cat(all_boxes)\n all_masks = torch.cat(all_masks)\n all_obj_to_img = torch.cat(all_obj_to_img)\n\n out = (all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img)\n return out", "def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x", "def transform():", "def prediction_collate(cls, batch):\n return default_prediction_collate(batch)", "def container_collate(inbatch, num_devices=None):\n\n if not isinstance(inbatch, collections.Sequence):\n raise TypeError(\"{} is not supported.\".format(inbatch.dtype))\n item0 = inbatch[0]\n if isinstance(item0, ItemContainer):\n return item0.__class__._collate(inbatch, num_devices=num_devices)\n elif isinstance(item0, collections.Sequence):\n transposed = zip(*inbatch)\n return [container_collate(samples,\n num_devices=num_devices)\n for samples in transposed]\n elif isinstance(item0, collections.Mapping):\n return {\n key: container_collate([d[key] for d in inbatch],\n num_devices=num_devices)\n for key in item0\n }\n else:\n return default_collate(inbatch)\n # return _collate_else(inbatch, container_collate)", "def dynamic_padding_collate_fn(batch_list):\n batch_uncollated = [[] for i in range(3)]\n\n for features in batch_list:\n length = features[1].sum().item()\n for i, feature in enumerate(features):\n batch_uncollated[i].append(feature[:length])\n\n batch_collated = []\n for batch in batch_uncollated:\n batch_collated.append(pad_sequence(batch, batch_first=True))\n\n return batch_collated", "def collater(self, samples):\r\n raise NotImplementedError", "def _msdd_train_collate_fn(self, batch):\n packed_batch = list(zip(*batch))\n features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets = packed_batch\n features_list, feature_length_list = [], []\n ms_seg_timestamps_list, ms_seg_counts_list, scale_clus_label_list, scale_mapping_list, targets_list = (\n [],\n [],\n [],\n [],\n [],\n )\n\n max_raw_feat_len = max([x.shape[0] for x in features])\n max_target_len = max([x.shape[0] for x in targets])\n max_total_seg_len = max([x.shape[0] for x in clus_label_index])\n\n for feat, feat_len, ms_seg_ts, ms_seg_ct, scale_clus, scl_map, tgt in batch:\n seq_len = tgt.shape[0]\n pad_feat = (0, max_raw_feat_len - feat_len)\n pad_tgt = (0, 0, 0, max_target_len - seq_len)\n pad_sm = (0, max_target_len - seq_len)\n pad_ts = (0, 0, 0, max_target_len - seq_len)\n pad_sc = (0, max_total_seg_len - scale_clus.shape[0])\n padded_feat = torch.nn.functional.pad(feat, pad_feat)\n padded_tgt = torch.nn.functional.pad(tgt, pad_tgt)\n padded_sm = torch.nn.functional.pad(scl_map, pad_sm)\n padded_ms_seg_ts = torch.nn.functional.pad(ms_seg_ts, pad_ts)\n padded_scale_clus = torch.nn.functional.pad(scale_clus, pad_sc)\n\n features_list.append(padded_feat)\n feature_length_list.append(feat_len.clone().detach())\n ms_seg_timestamps_list.append(padded_ms_seg_ts)\n ms_seg_counts_list.append(ms_seg_ct.clone().detach())\n scale_clus_label_list.append(padded_scale_clus)\n scale_mapping_list.append(padded_sm)\n targets_list.append(padded_tgt)\n\n features = torch.stack(features_list)\n feature_length = torch.stack(feature_length_list)\n ms_seg_timestamps = torch.stack(ms_seg_timestamps_list)\n clus_label_index = torch.stack(scale_clus_label_list)\n ms_seg_counts = torch.stack(ms_seg_counts_list)\n scale_mapping = torch.stack(scale_mapping_list)\n targets = torch.stack(targets_list)\n return features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets", "def _apply_transform(self):\n pass", "def get_transform_fn():", "def test_call(self):\n expected = self.df.columns\n actual = self.normalizer()(\n self.df, **self.kwargs).columns\n\n expected = sorted(expected)\n actual = sorted(actual)\n self.assertListEqual(actual, expected)", "def basic_collate(batch):\n\n minibatch, targets = zip(*[(a, b) for (a,b) in batch])\n minibatch = stack(minibatch, dim=0)\n return minibatch, targets", "def caption_collate_fn(data):\n # Sort a data list by caption length from longest to shortest.\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions = zip(*data)\n\n # merge images (from tuple of 3D tensor to 4D tensor).\n # if using features, 2D tensor to 3D tensor. (batch_size, 256)\n images = torch.stack(images, 0)\n\n # merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n return images, targets, lengths", "def merge_comp(it):\n s = it[0]\n for i in it[1:]:\n s = s + i\n s = normalize_comp(s)\n return s", "def SNLI_collate_func(batch):\n x1_list = []\n x1_length_list = []\n x2_list = []\n x2_length_list = []\n label_list = []\n for datum in batch:\n x1_padded_vec = np.pad(np.array(datum[0]), \n pad_width=((0,MAX_SENTENCE_LENGTH-datum[1])), \n mode=\"constant\", constant_values=0)\n x1_list.append(x1_padded_vec)\n x1_length_list.append(datum[1])\n \n x2_padded_vec = np.pad(np.array(datum[2]), \n pad_width=((0,MAX_SENTENCE_LENGTH-datum[3])), \n mode=\"constant\", constant_values=0)\n x2_list.append(x2_padded_vec)\n x2_length_list.append(datum[3])\n \n label_list.append(datum[4])\n\n return [torch.from_numpy(np.array(x1_list)), torch.LongTensor(x1_length_list),\n torch.from_numpy(np.array(x2_list)), torch.LongTensor(x2_length_list),\n torch.LongTensor(label_list)]", "def safe_apply(row, fn):\n if row:\n return fn(row)\n else:\n return row", "def custom_collate_segmentation(\n batch: List[Dict[str, Tensor]], groundtruth: bool = True\n) -> Dict[str, Union[Any, List[Tensor]]]:\n\n batch = default_collate(batch)\n return batch", "def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):\n chunked_data = self._get_chunk_data(\n map(self.pipeline, inputs), batch_size)\n yield from map(self.collate_fn, chunked_data)", "def map_values_c(fun):\n return partial(map_values, fun)", "def preprocessing_transform(self, x):\n if isinstance(self.column_text, int) and self.column_text not in x.columns:\n col = self.column_text\n else:\n col = list(x.columns).index(self.column_text)\n\n ct = x.shape[0]\n # INPUTS\n if self.method_embedding.lower() in ['roberta', \"camembert\", \"xlm-roberta\"]:\n ids = np.ones((ct, self.maxlen), dtype='int32')\n else:\n ids = np.zeros((ct, self.maxlen), dtype='int32')\n att = np.zeros((ct, self.maxlen), dtype='int32')\n tok = np.zeros((ct, self.maxlen), dtype='int32')\n\n for k in range(ct):\n text = \" \" + \" \".join(x.iloc[k, col].split())\n\n if self.method_embedding == 'RoBERTa':\n enc = self.tokenizer.encode(text)\n else:\n enc = self.tokenizer.encode(text, max_length=self.maxlen, truncation=True)\n\n # CREATE BERT INPUTS\n if self.method_embedding == 'RoBERTa':\n ids[k, :len(enc.ids)] = enc.ids[:self.maxlen]\n att[k, :len(enc.ids)] = 1\n else:\n ids[k, :len(enc)] = enc\n att[k, :len(enc)] = 1\n\n x_preprocessed = [ids, att, tok]\n if self.dimension_embedding == 'word_embedding':\n return x_preprocessed\n else:\n model_extractor = self.model_extract_document_embedding()\n document_embedding = model_extractor.predict(x_preprocessed)\n return document_embedding", "def test_default(self):\n iterables = [xrange(4), xrange(7), xrange(3, 6)]\n eq_(sorted(reduce(list.__add__, [list(it) for it in iterables])),\n list(collate(*iterables)))", "def collate_fn(batch):\n # From\n # https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection/blob/43fd8be9e82b351619a467373d211ee5bf73cef8/datasets.py#L60\n\n images = list()\n boxes = list()\n labels = list()\n\n for b in batch:\n if b[0] is not None:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n\n if len(images) > 0:\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels", "def mapfn(k, v):\n for row in v:\n # rellenar el codigo\n pass", "def collate_fn(list_samples):\n data = dict(outputs=None) # compliant with DataManager <collate_fn>\n data[\"inputs\"] = torch.stack([torch.from_numpy(sample[0]) for sample in list_samples], dim=0).float()\n data[\"labels\"] = torch.stack([torch.tensor(sample[1]) for sample in list_samples], dim=0).squeeze().float()\n return DataItem(**data)", "def collate_fn_bow(data, vocab_size):\n labels = torch.zeros(len(data), dtype=torch.long)\n ood_labels = torch.zeros(len(data), dtype=torch.long)\n rows, cols = [], []\n values = []\n for idx, (numerical_sent, label, is_ood) in enumerate(data):\n labels[idx] = label\n ood_labels[idx] = is_ood\n for num, cnt in zip(*np.unique(numerical_sent, return_counts=True)):\n rows.append(idx)\n cols.append(num)\n values.append(cnt)\n indices = np.vstack((rows, cols))\n\n i = torch.LongTensor(indices)\n v = torch.FloatTensor(values)\n batch = torch.sparse.FloatTensor(i, v, torch.Size((len(data), vocab_size)))\n return batch, labels, ood_labels", "def collate_fn(batch):\n # Unzip the batch\n imgs,qs, answers = list(zip(*batch))\n\n # concatenate the vectors\n imgs = torch.stack(imgs)\n \n #concatenate the labels\n q = torch.stack(qs)\n a = torch.stack(answers)\n \n return imgs, q, a", "def sort(self, key_func):\n pass", "def view(\n self,\n collate_fn: Union[callable, str] = \"batch_of_g_and_y\",\n *args,\n **kwargs\n ):\n # provide default collate function\n if isinstance(collate_fn, str):\n collate_fn = getattr(self, collate_fn)\n\n return torch.utils.data.DataLoader(\n dataset=self,\n collate_fn=collate_fn,\n *args,\n **kwargs,\n )", "def imap_c(func):\n return functools.partial(imap, func)", "def apply(self, fn, column_label):\n return [fn(v) for v in self[column_label]]", "def collate(filename):\r\n x=open(filename,\"r\")\r\n total_words=[]\r\n for line in x:\r\n line=line.strip(\"\\n\")\r\n line=line.split(\":\")\r\n if len(total_words)<1:\r\n total_words.append(line)\r\n else:\r\n x= len(total_words)\r\n if line[0] == total_words[x-1][0]:\r\n if int(line[1]) > int(total_words[x-1][len(total_words[x-1])-1]):\r\n total_words[x-1].append(line[1])\r\n else:\r\n total_words.append(line)\r\n y = open(\"collated_ids.txt\", \"w\")\r\n # for i in range(len(total_words)):\r\n # if len(total_words[i])<3:\r\n # total_words[i]=\":\".join(total_words[i])+\"\\n\"\r\n # else:\r\n # id=\" \".join(total_words[i][1:])\r\n # total_words[i]=total_words[i][0]+\":\"+id+\"\\n\"\r\n # y.writelines(total_words)\r\n for i in range(len(total_words)):\r\n id=\"\"\r\n for j in range(1,len(total_words[i])):\r\n id=id +total_words[i][j] +\" \"\r\n y.write(str(total_words[i][0]) + \":\" +str(id) + \"\\n\")", "def lt_inplace(a,b):", "def coco_collate_fn(batch):\n all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img = [], [], [], [], []\n\n for i, (img, objs, boxes, masks) in enumerate(batch):\n all_imgs.append(img[None])\n O = objs.size(0)\n all_objs.append(objs)\n all_boxes.append(boxes)\n all_masks.append(masks)\n\n all_obj_to_img.append(torch.LongTensor(O).fill_(i))\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.cat(all_objs)\n all_boxes = torch.cat(all_boxes)\n all_masks = torch.cat(all_masks)\n all_obj_to_img = torch.cat(all_obj_to_img)\n\n out = (all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img)\n\n return out" ]
[ "0.7459662", "0.669416", "0.64607346", "0.63689786", "0.6184213", "0.61835617", "0.6163743", "0.59677935", "0.5942852", "0.5942852", "0.58277845", "0.58204836", "0.56743795", "0.56363", "0.5553087", "0.55181116", "0.54813635", "0.5477117", "0.54744", "0.5447483", "0.5447414", "0.54047096", "0.53677654", "0.53488827", "0.53449506", "0.5338551", "0.52991295", "0.52430314", "0.52096593", "0.5183252", "0.5171824", "0.51715916", "0.5168523", "0.5162396", "0.5073969", "0.50646406", "0.50640947", "0.5057045", "0.5048437", "0.5048018", "0.5043405", "0.4976133", "0.49303642", "0.4930298", "0.49219936", "0.49044666", "0.49030912", "0.4897656", "0.48963633", "0.4895329", "0.48874164", "0.48826182", "0.48656413", "0.48276892", "0.48118424", "0.48053464", "0.47816068", "0.4777931", "0.4770731", "0.47563848", "0.47563848", "0.47563848", "0.47516024", "0.47484228", "0.4730394", "0.4729753", "0.47140285", "0.47107002", "0.47103003", "0.4704247", "0.46996218", "0.4696189", "0.46714115", "0.46649793", "0.4660777", "0.4656031", "0.464647", "0.46439993", "0.46433017", "0.46415946", "0.4632917", "0.46281442", "0.46167183", "0.46151686", "0.46051633", "0.459912", "0.459778", "0.4593689", "0.45825016", "0.45808205", "0.45576066", "0.455603", "0.45486185", "0.4532311", "0.45280224", "0.4513397", "0.45061004", "0.44969144", "0.4492369", "0.4487571", "0.44442326" ]
0.0
-1
Subsample the training data (for low resource experiments)
def subsample_training_set(self, k, seed=None): if seed is not None: rng_state = th.random.get_rng_state() with th.random.fork_rng(): th.manual_seed(seed) self._subsample_training_set(k) if any(th.random.get_rng_state() != rng_state): raise ValueError("Bad RNG state") else: self._subsample_training_set(k)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subsample(self, dataset):\n sample_idx = np.random.choice(\n dataset.shape[0], self.sample_size, replace=True)\n sample = dataset[sample_idx,...]\n return sample", "def subsampleData(self, count):\n size = 0\n for block in self.blocks: size += len(block[1])\n subset = numpy.random.permutation(size)[:count]\n subset.sort()\n\n pos = 0\n index = 0\n ret = Dataset()\n for block in self.blocks:\n while subset[index]<(pos+len(block[1])):\n loc = subset[index] - pos\n ret.add(block[0][loc,:], block[1][loc])\n index += 1\n if index==subset.shape[0]: return ret\n pos += len(block[1])\n \n return ret", "def loadtrainData_undersampling():\n train = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train.append([float(lineArr[i]) for i in range(len(lineArr))])\n\n pos = []\n neg = []\n for i in train:\n if i[-1] == 1.0:\n pos.append(i)\n else:\n neg.append(i)\n slice1 = random.sample(neg, len(pos))\n data = pos + slice1\n train_x = []\n train_y = []\n y = []\n for line in data:\n train_x.append([float(line[i]) for i in range(len(line) - 1)])\n y.append([int(line[-1])])\n for i in range(len(y)):\n train_y.append(y[i][0])\n return np.mat(train_x), np.mat(train_y).transpose()", "def loadtrainData_oversampling():\n pre_x = []\n pre_y = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n pre_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])\n pre_y.append(int(lineArr[-1]))\n ros = RandomOverSampler(random_state=0)\n sampl_x, sampl_y = ros.fit_sample(pre_x, pre_y)\n return np.mat(sampl_x), np.mat(sampl_y).transpose()", "def subsampling(dat: pd.DataFrame):\n if dat.shape[0] > 10000:\n return dat.sample(n=10000, random_state=1).reset_index(drop=True)\n else:\n return dat", "def sample_train(self, sample_frac):\n df_tr = self.get_dataset_type_df('train')\n original_col_cnt = len(df_tr)\n # Set the test records aside\n df_te = self.get_dataset_type_df('test')\n df_tr = df_tr.sample(frac=sample_frac)\n self.df = pd.concat([df_tr, df_te])\n logging.info(\"Sampled training set from {} to {} rows, fraction={:0.1%}\".format(original_col_cnt, len(df_tr), len(df_tr)/original_col_cnt))", "def downsample_data(\n downsampled_frac,\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n):\n\n np.random.seed(2023)\n tmp_tr_id = np.random.choice(\n len(fore_train_op),\n int(len(fore_train_op) * downsampled_frac),\n replace=False,\n )\n np.random.seed(2023)\n tmp_val_id = np.random.choice(\n len(fore_valid_op),\n int(len(fore_valid_op) * downsampled_frac),\n replace=False,\n )\n\n fore_train_ip = [x[tmp_tr_id] for x in fore_train_ip]\n fore_train_op = fore_train_op[tmp_tr_id]\n fore_valid_ip = [x[tmp_val_id] for x in fore_valid_ip]\n fore_valid_op = fore_valid_op[tmp_val_id]\n\n np.random.seed(2023)\n tmp_tr_id = np.random.choice(\n len(train_op), int(len(train_op) * downsampled_frac), replace=False\n )\n np.random.seed(2023)\n tmp_val_id = np.random.choice(\n len(valid_op), int(len(valid_op) * downsampled_frac), replace=False\n )\n np.random.seed(2023)\n tmp_test_id = np.random.choice(\n len(test_op), int(len(test_op) * downsampled_frac), replace=False\n )\n\n train_ip = [x[tmp_tr_id] for x in train_ip]\n train_op = train_op[tmp_tr_id]\n valid_ip = [x[tmp_val_id] for x in valid_ip]\n valid_op = valid_op[tmp_val_id]\n test_ip = [x[tmp_test_id] for x in test_ip]\n test_op = test_op[tmp_test_id]\n\n return (\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n )", "def subsampling(dataset, class_column_index, class_max_count, class_dict):\n out = []\n for row in dataset:\n cls = row[class_column_index]\n rInt = np.random.randint(0, class_dict[cls])\n if rInt <= class_max_count:\n out.append(row)\n ss_data = np.array(out)\n\n return ss_data", "def _subsample(\n data: Dict[str, np.ndarray],\n sample_count: int,\n) -> Dict[str, np.ndarray]:\n count = data[\"input\"].shape[0]\n samples = np.random.choice(count, sample_count, replace=False)\n return {\n \"input\": data[\"input\"][samples, :],\n \"output\": data[\"output\"][samples, :],\n }", "def split_samples(data):\n\n training_samples = data[0:9497]\n test_samples = data[9497:11300]\n\n return training_samples, test_samples", "def subsample(inputs, factor, name=None):\n if factor == 1:\n return inputs\n else:\n return max_pool(inputs, filter_size=(1, 1), stride=(factor, factor), name=name)", "def _sample_mini_dataset(dataset, num_classes, num_shots):\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n for sample in class_obj.sample(num_shots):\n yield (sample, class_idx)", "def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsamples) == self._number_of_subsamples:\n raise WaveformError(\n 'Number of subsample is %r, while %r is expected' % (\n len(self._subsamples), self._number_of_subsamples))\n logging.debug('down-samples: %r', self._subsamples)", "def mask_test_train(data, split): \n # create a copy of the full data for reduction\n training_set = data.copy()\n\n # find index of values which are not empty\n nonzero_inds = training_set.nonzero()\n\n # create list of index pairs\n nonzero_pairs = list(zip(nonzero_inds[0], nonzero_inds[1]))\n\n # calculate the number of samples to be removed in training set\n num_samples = int(np.ceil(split*len(nonzero_pairs)))\n\n # get random samples\n samples = random.sample(nonzero_pairs, num_samples)\n\n # remove selected samples in training set\n user_inds = [index[0] for index in samples]\n item_inds = [index[1] for index in samples]\n training_set[user_inds, item_inds] = 0 \n\n return training_set, list(set(user_inds)), np.array(samples)", "def random_sampling(self, n_subset):\n t = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[INFO] {} - Random sampling with replacement ...\".format(t))\n subset_list = []\n training_set = self\n subset_size = math.ceil(training_set.n_samples / n_subset)\n # create subsets\n for i in range(n_subset):\n # run a permutation to mix all samples (sampling with replacement)\n self.permutation()\n # always draw the first samples\n start_idx = 0\n stop_idx = subset_size\n subset = deepcopy(training_set)\n subset.data = subset.data[start_idx:stop_idx][:]\n subset.labels = subset.labels[start_idx:stop_idx][:]\n subset.labels_onehot = subset.labels_onehot[start_idx:stop_idx][:]\n subset.n_samples = stop_idx - start_idx\n subset.true_distribution = subset._get_true_distribution()\n subset.set_batch_size(training_set.batch_size)\n subset_list.append(subset)\n print(\"\\tSubset shape {}\".format(subset.data.shape))\n return subset_list", "def load_susy(trainsize=500, testsize=1000):\n filename = 'datasets/susysubset.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset", "def subsample(inputs, factor, scope=None):\n if factor == 1:\n return inputs\n else:\n return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)", "def split_train_test_data(total_data_df, frac):\n test_data_df = total_data_df.sample(frac=frac, random_state=1)\n train_data_df = total_data_df.loc[total_data_df.index.difference(test_data_df.index)]\n return train_data_df, test_data_df", "def load_subsampled_clouds(self, subsampling_parameter):\n\n if 0 < subsampling_parameter <= 0.01:\n raise ValueError('subsampling_parameter too low (should be over 1 cm')\n\n # Initiate containers\n self.input_points = {'training': [], 'validation': [], 'test': []}\n self.input_normals = {'training': [], 'validation': [], 'test': []}\n self.input_labels = {'training': [], 'validation': []}\n\n ################\n # Training files\n ################\n\n # Restart timer\n t0 = time.time()\n\n # Load wanted points if possible\n print('\\nLoading training points')\n filename = join(self.path, 'train_{:.3f}_record.pkl'.format(subsampling_parameter))\n\n if exists(filename):\n with open(filename, 'rb') as file:\n self.input_points['training'], \\\n self.input_normals['training'], \\\n self.input_labels['training'] = pickle.load(file)\n\n # Else compute them from original points\n else:\n\n # Collect training file names\n names = np.loadtxt(join(self.path, self.data_folder, 'modelnet40_train.txt'), dtype=np.str)\n\n # Collect point clouds\n for i, cloud_name in enumerate(names):\n\n # Read points\n class_folder = '_'.join(cloud_name.split('_')[:-1])\n txt_file = join(self.path, self.data_folder, class_folder, cloud_name) + '.txt'\n data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32)\n\n # Subsample them\n if subsampling_parameter > 0:\n points, normals = grid_subsampling(data[:, :3],\n features=data[:, 3:],\n sampleDl=subsampling_parameter)\n else:\n points = data[:, :3]\n normals = data[:, 3:]\n\n # Add to list\n self.input_points['training'] += [points]\n self.input_normals['training'] += [normals]\n\n # Get labels\n label_names = ['_'.join(name.split('_')[:-1]) for name in names]\n self.input_labels['training'] = np.array([self.name_to_label[name] for name in label_names])\n\n # Save for later use\n with open(filename, 'wb') as file:\n pickle.dump((self.input_points['training'],\n self.input_normals['training'],\n self.input_labels['training']), file)\n\n lengths = [p.shape[0] for p in self.input_points['training']]\n sizes = [l * 4 * 6 for l in lengths]\n print('{:.1f} MB loaded in {:.1f}s'.format(np.sum(sizes) * 1e-6, time.time() - t0))\n\n ############\n # Test files\n ############\n\n # Restart timer\n t0 = time.time()\n\n # Load wanted points if possible\n print('\\nLoading test points')\n filename = join(self.path, 'test_{:.3f}_record.pkl'.format(subsampling_parameter))\n if exists(filename):\n with open(filename, 'rb') as file:\n self.input_points['validation'], \\\n self.input_normals['validation'], \\\n self.input_labels['validation'] = pickle.load(file)\n\n # Else compute them from original points\n else:\n\n # Collect test file names\n names = np.loadtxt(join(self.path, self.data_folder, 'modelnet40_test.txt'), dtype=np.str)\n\n # Collect point clouds\n for i, cloud_name in enumerate(names):\n\n # Read points\n class_folder = '_'.join(cloud_name.split('_')[:-1])\n txt_file = join(self.path, self.data_folder, class_folder, cloud_name) + '.txt'\n data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32)\n\n # Subsample them\n if subsampling_parameter > 0:\n points, normals = grid_subsampling(data[:, :3],\n features=data[:, 3:],\n sampleDl=subsampling_parameter)\n else:\n points = data[:, :3]\n normals = data[:, 3:]\n\n # Add to list\n self.input_points['validation'] += [points]\n self.input_normals['validation'] += [normals]\n\n\n # Get labels\n label_names = ['_'.join(name.split('_')[:-1]) for name in names]\n self.input_labels['validation'] = np.array([self.name_to_label[name] for name in label_names])\n\n # Save for later use\n # Save for later use\n with open(filename, 'wb') as file:\n pickle.dump((self.input_points['validation'],\n self.input_normals['validation'],\n self.input_labels['validation']), file)\n\n lengths = [p.shape[0] for p in self.input_points['validation']]\n sizes = [l * 4 * 6 for l in lengths]\n print('{:.1f} MB loaded in {:.1f}s\\n'.format(np.sum(sizes) * 1e-6, time.time() - t0))\n\n small = False\n if small:\n\n for split in ['training', 'validation']:\n\n pick_n = 50\n gen_indices = []\n for l in self.label_values:\n label_inds = np.where(np.equal(self.input_labels[split], l))[0]\n if len(label_inds) > pick_n:\n label_inds = label_inds[:pick_n]\n gen_indices += [label_inds.astype(np.int32)]\n gen_indices = np.hstack(gen_indices)\n\n self.input_points[split] = np.array(self.input_points[split])[gen_indices]\n self.input_normals[split] = np.array(self.input_normals[split])[gen_indices]\n self.input_labels[split] = np.array(self.input_labels[split])[gen_indices]\n\n if split == 'training':\n self.num_train = len(gen_indices)\n else:\n self.num_test = len(gen_indices)\n\n # Test = validation\n self.input_points['test'] = self.input_points['validation']\n self.input_normals['test'] = self.input_normals['validation']\n self.input_labels['test'] = self.input_labels['validation']\n\n return", "def trainSet(self):\r\n self.currIdx = 0\r\n random.shuffle(self.trainSamples)\r\n self.samples = self.trainSamples[:self.numTrainSamplesPerEpoch]", "def bootstrap(data):\r\n size = int(len(data))\r\n train = resample(data, n_samples=size, replace=True)\r\n test = data.drop(train.index) \r\n return train[encoded_features], train[target], test[encoded_features], test[target]", "def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ", "def downsample_sam(self, factor):", "def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def sample(self, batch_size):\n raise NotImplementedError", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def subsample():\n\n nwav = 872\n nrow = 1600\n ncol = 1560\n\n fpath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_binned/nrow1600')\n fnames = ['full_frame_20ms_faster_VNIR_1600.raw',\n 'full_frame_20ms_faster_VNIR_1600_flat.raw']\n\n for fname in fnames:\n print(\"SUBSAMPLE: reading data from {0}\".format(fpath))\n print(\"SUBSAMPLE: {0}\".format(fname))\n data = np.fromfile(os.path.join(fpath,fname)).reshape(nwav,nrow,ncol)\n\n for fac in [2,4,8]:\n trow = '{0:04}'.format(1600/fac)\n opath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_subsample',\n 'nrow'+trow)\n oname = fname.replace('1600',trow)\n\n print(\"SUBSAMPLE: writing subsampled data to {0}\".format(opath))\n print(\"SUBSAMPLE: {0}\".format(oname))\n data[:,::fac,::fac].tofile(open(os.path.join(opath,oname),'wb'))\n\n return", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def process_sample_train(self):\n raise NotImplementedError", "def subsample(y, limit=256, factor=2):\n if len(y) > limit:\n return y[::factor].reset_index(drop=True)\n return y", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def split(self, train_fraction=0.8, val_fraction=0.2, test_fraction=0, seed=1):\n if self.is_initialized():\n return\n self.ensure_fraction_sum(train_fraction, val_fraction, test_fraction)\n np.random.seed(seed)\n self.samples = sorted(self.samples)\n np.random.shuffle(self.samples)\n train_idx = ceil(train_fraction*(len(self.samples)))\n val_idx = train_idx + ceil(val_fraction*(len(self.samples)))\n test_idx = val_idx + ceil(test_fraction*(len(self.samples)))\n indices = list(range(len(self.samples)))\n self.indices[TRAIN_SUBSET] = indices[:train_idx]\n self.indices[VAL_SUBSET] = indices[train_idx:val_idx]\n self.indices[TEST_SUBSET] = indices[val_idx:test_idx]", "def _set_number_of_subsamples(self, number_of_subsamples):\n self._number_of_subsamples = number_of_subsamples\n self._compute_down_sample_factor()", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def spy_train_dataset():\n train_p = np.load(\"./processed_data/train/raw/train_p.npy\")\n train_u = np.load(\"./processed_data/train/raw/train_u.npy\")\n print(train_p.shape)\n print(train_u.shape)\n\n np.random.shuffle(train_p)\n spy = train_p[: int(_spy_rate * train_p.shape[0]), :]\n spy[:, -1] = 0\n\n spy_u = np.concatenate([train_u, spy])\n spy_p = np.copy(train_p[int(_spy_rate * train_p.shape[0]):])\n print(spy_p.shape)\n print(spy_u.shape)\n spy_train = np.concatenate([spy_p, spy_u])\n np.random.shuffle(spy_train)\n np.save(\"./processed_data/train/spy/train.npy\", spy_train)\n print(spy_train.shape)", "def test_random_forest_min_samples_split_parameter(params, X_train, X_test, y_train, y_test):", "def downsampling(x_train, y_train, random_state=42):\n sampling = pd.concat([x_train, y_train], axis=1)\n big = sampling[y_train == y_train.value_counts().index[0]]\n small = sampling[y_train == y_train.value_counts().index[1]]\n\n downsampled = resample(big,\n replace=False,\n n_samples=len(small),\n random_state=random_state)\n downsampled = pd.concat([downsampled, small])\n x_train_bal = downsampled[downsampled.columns.values[:-1]]\n y_train_bal = downsampled[downsampled.columns.values[-1]]\n\n del sampling, big, small, downsampled\n return x_train_bal, y_train_bal", "def subsample(self, se):\n\t\tdf = ReadDF('noname', self.readdict.refmap)\n\t\tfor i in random.sample(xrange(1, self.n+1), min(se, self.n)):\n\t\t\tpos, read = self.partial_sampling_func(i)\n\t\t\tdf.add_read_to_vec(read,copy=1) # important to remember to use just this ONE copy!!!\n\t\treturn df", "def _resample_data(self, X, y, N):\n if N > 0:\n # Split labels into set of indexes for each class\n class_idxs = [np.where(y == c)[0] for c in np.unique(y)]\n\n # Shuffle each of sets of indexes\n [np.random.shuffle(i) for i in class_idxs]\n\n # Take N indexes, or fewer if total is less than N\n subset_idx = [i[:N] if len(i) >= N else i for i in class_idxs]\n\n # Use advanced indexing to get subsets of X and y\n idxs = np.array(subset_idx).ravel()\n np.random.shuffle(idxs)\n X, y = X[idxs], y[idxs]\n\n return X, y", "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)", "def oversampling_experiment():\n model, history = train.train(BATCH_SIZE, EPOCHS, print_model_summary=True,\n oversampling=True)\n evaluate_both(model)\n plotting.plot_metrics(history)", "def split_train_test(ratings):\r\n ratings = ratings.sample(frac=1).reset_index(drop=True)\r\n train_user_list = []\r\n train_item_list = []\r\n train_rating_list = []\r\n test_user_list = []\r\n test_item_list = []\r\n test_rating_list = []\r\n user_pool = set(ratings['userId'].unique())\r\n for idx in user_pool:\r\n flag = 0\r\n items = ratings[ratings['userId']==idx][['itemId','rating']]\r\n for i, row in items.iterrows():\r\n if flag == 0:\r\n test_user_list.append(int(idx))\r\n test_item_list.append(int(row['itemId']))\r\n test_rating_list.append(row['rating'])\r\n flag = 1\r\n else:\r\n train_user_list.append(int(idx))\r\n train_item_list.append(int(row['itemId']))\r\n train_rating_list.append(row['rating'])\r\n\r\n train = pd.DataFrame({'userId': train_user_list, 'itemId': train_item_list, 'rating': train_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n test = pd.DataFrame({'userId': test_user_list, 'itemId': test_item_list, 'rating': test_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n return [train, test]\r\n \r\n\r\n \r\n #train, test = train_test_split(ratings, test_size=0.1, shuffle=True)\r\n #return [train, test]\r", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def reduce_sample_size(data,classes,times=2):\n data=data[range(0,data.shape[0],times)]\n classes=classes[range(0,classes.shape[0],times)]\n return data,classes", "def split_dataset(dataset, test_size):\r\n random.shuffle(dataset)\r\n \r\n rating_negativ = []\r\n rating_positiv = []\r\n \r\n for row in dataset:\r\n if int(row[1]) == 0:\r\n rating_negativ.append(row)\r\n elif int(row[1]) == 1:\r\n rating_positiv.append(row)\r\n\r\n random.shuffle(rating_positiv)\r\n random.shuffle(rating_negativ) \r\n \r\n neg_train_data, neg_val_data = train_test_split(rating_negativ, test_size=test_size)\r\n pos_train_data, pos_val_data = train_test_split(rating_positiv, test_size=test_size)\r\n \r\n train_data = neg_train_data + pos_train_data\r\n val_data = neg_val_data + pos_val_data\r\n \r\n random.shuffle(train_data)\r\n random.shuffle(val_data)\r\n \r\n return train_data, val_data", "def downsample_data(dataset):\n loss = dataset.loc[dataset[TARGET] == 'loss']\n good_gain = dataset.loc[dataset[TARGET] == 'good_gain']\n \n sample_size = min([loss.shape[0], good_gain.shape[0]])\n loss = loss.sample(n=sample_size, random_state=42)\n good_gain = good_gain.sample(n=sample_size, random_state=42)\n \n frames = [loss, good_gain]\n return shuffle(pd.concat(frames), random_state=0)", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def train_test_split(ratings):\n test = set(range(len(ratings))[::1000])\n train = sorted(set(range(len(ratings))) - test)\n test = sorted(test)\n return ratings.iloc[train], ratings.iloc[test]", "def regular_subsample(neuron):\n # select all the main points\n selected_index = get_main_points(neuorn)\n\n # Computing the parent id of the selected nodes\n neuron = neuron_with_selected_nodes(selected_index)\n return neuron", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def prepare_data(path_to_data=PATH_TO_DATA, batch_size=BATCH_SIZE,\n img_size=IMG_SIZE, subsample=None, shuffle=True):\n if subsample is not None:\n idx = np.arange(5000)\n np.random.shuffle(idx)\n sampler = SubsetRandomSampler(idx[:subsample])\n shuffle = False\n else:\n sampler = None\n transform = transforms.Compose([\n # transforms.RandomHorizontalFlip(p=0.5),\n # transforms.RandomRotation((-20, 20)),\n # transforms.Resize(img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n training_data = torchvision.datasets.ImageFolder(\n path_to_data, transform=transform)\n print(\"Length of data: \", len(training_data))\n\n training_loader = torch.utils.data.DataLoader(\n training_data, batch_size=batch_size,\n sampler=sampler, shuffle=shuffle)\n return training_loader", "def split_dataset(samples, ratio=0.8):\n nsamples = len(samples)\n num_train = int(ratio*nsamples)\n\n # shuffle samples\n shuffle(samples)\n\n trainset = samples[:num_train]\n testset = samples[num_train:]\n\n return trainset, testset", "def bootstrap_sample(data):\n return [random.choice(data) for _ in data]", "def test_sample_rows():\n ratings = lktu.ml_test.ratings\n ratings = ratings.set_index('user') ##forces non-unique index\n with pytest.raises(ValueError):\n for split in xf.sample_rows(ratings, partitions=5, size=1000):\n pass", "def random_split(X, test_rate=0.3):\n n_sample = X.shape[0]\n test_size = int(n_sample * test_rate)\n train_size = n_sample - test_size\n all_indices = list(range(n_sample))\n np.random.shuffle(all_indices) \n all_indices = np.array(all_indices)\n return all_indices[:train_size], all_indices[train_size:]", "def sample_train_batch(self):\r\n batch = []\r\n labels =[]\r\n num_groups = self.batch_size // self.batch_k\r\n sampleed_classes = np.random.choice(self.train_class_ids,num_groups,replace=False)\r\n for class_id in sampleed_classes:\r\n img_fname = np.random.choice(self.train_image_files[class_id],self.batch_k,replace=False)\r\n batch += img_fname.tolist()\r\n labels += [class_id]*self.batch_k\r\n return batch,labels", "def randomSubData(self, number): \n if number < 0 or number > self.__numExamples: \n raise ValueError(\"Random subset size must be between 0 and \" + str(self.__numExamples))\n \n self.__exampleIndices = array(sample(list(range(0, self.__numExamples)), number))", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def _compute_quantized_subsamples(self):\n self._down_sample()\n self._quantize()", "def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test", "def overSampling( self, feature, Class, random_state = 0 ):\n oversampler = SMOTE(random_state=0)\n feature_resample, Class_resample = oversampler.fit_sample(feature, \n Class)\n print(\"Warning: You are increasing the dataset to balance the data\\n\")\n return feature_resample, Class_resample", "def downsample(self, number):\n for num, ss in enumerate(self.samples):\n self.samples[num], self.extra_kwargs[num] = _downsample(\n ss, number, extra_kwargs=self.extra_kwargs[num]\n )", "def load_subsampled_clouds(self, subsampling_parameter):\n\n if 0 < subsampling_parameter <= 0.01:\n raise ValueError('subsampling_parameter too low (should be over 1 cm')\n\n # Create path for files\n tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter))\n if not exists(tree_path):\n makedirs(tree_path)\n\n # List of training files\n self.train_files = np.sort([join(self.train_path, f) for f in listdir(self.train_path) if f[-4:] == '.ply'])\n\n # Add test files\n self.test_files = np.sort([join(self.test_path, f) for f in listdir(self.test_path) if f[-4:] == '.ply'])\n\n if self.debug:\n self.train_files = self.train_files[-101:]\n self.test_files = self.test_files[:10]\n\n files = np.hstack((self.train_files, self.test_files))\n # Initiate containers\n self.input_trees = {'training': [], 'validation': [], 'test': []}\n self.input_colors = {'training': [], 'validation': [], 'test': []}\n self.input_vert_inds = {'training': [], 'validation': [], 'test': []}\n self.input_labels = {'training': [], 'validation': []}\n\n # Advanced display\n N = len(files)\n progress_n = 30\n fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'\n print('\\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(subsampling_parameter))\n\n for i, file_path in enumerate(files):\n\n # get cloud name and split\n cloud_name = file_path.split('/')[-1][:-4]\n cloud_folder = file_path.split('/')[-2]\n if 'train' in cloud_folder:\n if cloud_name in self.validation_clouds:\n self.all_splits += [1]\n cloud_split = 'validation'\n else:\n self.all_splits += [0]\n cloud_split = 'training'\n else:\n cloud_split = 'test'\n\n if (cloud_split != 'test' and self.load_test) or (cloud_split == 'test' and not self.load_test):\n continue\n\n # Name of the input files\n KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))\n sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))\n\n # Check if inputs have already been computed\n if isfile(KDTree_file):\n\n # read ply with data\n data = read_ply(sub_ply_file)\n sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T\n sub_vert_inds = data['vert_ind']\n if cloud_split == 'test':\n sub_labels = None\n else:\n sub_labels = data['class']\n\n # Read pkl with search tree\n with open(KDTree_file, 'rb') as f:\n search_tree = pickle.load(f)\n\n else:\n # Read ply file\n data = read_ply(file_path)\n points = np.vstack((data['x'], data['y'], data['z'])).T\n colors = np.vstack((data['red'], data['green'], data['blue'])).T\n if cloud_split == 'test':\n int_features = data['vert_ind']\n else:\n int_features = np.vstack((data['vert_ind'], data['class'])).T\n\n # Subsample cloud\n sub_points, sub_colors, sub_int_features = grid_subsampling(points,\n features=colors,\n labels=int_features,\n sampleDl=subsampling_parameter)\n\n # Rescale float color and squeeze label\n sub_colors = sub_colors / 255\n if cloud_split == 'test':\n sub_vert_inds = np.squeeze(sub_int_features)\n sub_labels = None\n else:\n sub_vert_inds = sub_int_features[:, 0]\n sub_labels = sub_int_features[:, 1]\n\n # Get chosen neighborhoods\n search_tree = KDTree(sub_points, leaf_size=50)\n\n # Save KDTree\n with open(KDTree_file, 'wb') as f:\n pickle.dump(search_tree, f)\n\n # Save ply\n if cloud_split == 'test':\n write_ply(sub_ply_file,\n [sub_points, sub_colors, sub_vert_inds],\n ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind'])\n else:\n write_ply(sub_ply_file,\n [sub_points, sub_colors, sub_labels, sub_vert_inds],\n ['x', 'y', 'z', 'red', 'green', 'blue', 'class', 'vert_ind'])\n\n # Fill data containers\n self.input_trees[cloud_split] += [search_tree]\n self.input_colors[cloud_split] += [sub_colors]\n self.input_vert_inds[cloud_split] += [sub_vert_inds]\n if cloud_split in ['training', 'validation']:\n self.input_labels[cloud_split] += [sub_labels]\n\n print('', end='\\r')\n print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True)\n\n # Get number of clouds\n self.num_training = len(self.input_trees['training'])\n self.num_validation = len(self.input_trees['validation'])\n self.num_test = len(self.input_trees['test'])\n\n # Get validation and test reprojection indices\n self.validation_proj = []\n self.validation_labels = []\n self.test_proj = []\n self.test_labels = []\n i_val = 0\n i_test = 0\n\n # Advanced display\n N = self.num_validation + self.num_test\n print('', end='\\r')\n print(fmt_str.format('#' * progress_n, 100), flush=True)\n print('\\nPreparing reprojection indices for validation and test')\n\n for i, file_path in enumerate(files):\n\n # get cloud name and split\n cloud_name = file_path.split('/')[-1][:-4]\n cloud_folder = file_path.split('/')[-2]\n\n # Validation projection and labels\n if (not self.load_test) and 'train' in cloud_folder and cloud_name in self.validation_clouds:\n proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))\n if isfile(proj_file):\n with open(proj_file, 'rb') as f:\n proj_inds, labels = pickle.load(f)\n else:\n # Get original mesh\n mesh_path = file_path.split('/')\n mesh_path[-2] = 'training_meshes'\n mesh_path = '/'.join(mesh_path)\n vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply', triangular_mesh=True)\n vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T\n labels = vertex_data['class']\n\n # Compute projection inds\n proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(vertices, return_distance=False))\n proj_inds = proj_inds.astype(np.int32)\n\n # Save\n with open(proj_file, 'wb') as f:\n pickle.dump([proj_inds, labels], f)\n\n self.validation_proj += [proj_inds]\n self.validation_labels += [labels]\n i_val += 1\n\n # Test projection\n if self.load_test and 'test' in cloud_folder:\n proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))\n if isfile(proj_file):\n with open(proj_file, 'rb') as f:\n proj_inds, labels = pickle.load(f)\n else:\n # Get original mesh\n mesh_path = file_path.split('/')\n mesh_path[-2] = 'test_meshes'\n mesh_path = '/'.join(mesh_path)\n vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply', triangular_mesh=True)\n vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T\n labels = np.zeros(vertices.shape[0], dtype=np.int32)\n\n # Compute projection inds\n proj_inds = np.squeeze(self.input_trees['test'][i_test].query(vertices, return_distance=False))\n proj_inds = proj_inds.astype(np.int32)\n\n with open(proj_file, 'wb') as f:\n pickle.dump([proj_inds, labels], f)\n\n self.test_proj += [proj_inds]\n self.test_labels += [labels]\n i_test += 1\n\n print('', end='\\r')\n\n\n print('\\n')\n\n return", "def split_to_train_test(split_ratio, input_data):\n\n data = input_data.drop_duplicates()\n data = data.sample(frac = 1)\n data = np.r_[data]\n rows, columns = data.shape\n a = int(rows*split_ratio)\n train_data = data[0: a]\n test_data = data[a: rows+1]\n\n return train_data, test_data", "def overSample(labels, data):\n tokenData, posData = [], []\n tokenNum = 3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1']\n if configuration['others']['verbose']:\n sys.stdout.write(reports.doubleSep + reports.tabs + 'Resampling:' + reports.doubleSep)\n sys.stdout.write(reports.tabs + 'data size before sampling = {0}\\n'.format(len(labels)))\n ros = RandomOverSampler(random_state=0)\n data, labels = ros.fit_sample(data, labels)\n for item in data:\n tokenData.append(np.asarray(item[:tokenNum]))\n posData.append(np.asarray(item[tokenNum:]))\n if configuration['others']['verbose']:\n sys.stdout.write(reports.tabs + 'data size after sampling = {0}\\n'.format(len(labels)))\n return np.asarray(labels), [np.asarray(tokenData), np.asarray(posData)]", "def __preprocess(data, sample_size: int = 200000):\n mean = data[:sample_size].mean(axis=0)\n data -= mean\n stdev = data[:sample_size].std(axis=0)\n data /= stdev\n return data", "def subsample_dataframe(X_input, y_input, proportion=0.20):\r\n assert len(X_input) == len(y_input), 'X and y arrays are not equal length'\r\n n_samples = len(X_input)\r\n np.random.seed(seed)\r\n random_index = np.random.choice(n_samples, int(n_samples * proportion), replace=False)\r\n X_input_sample, y_input_sample = X_input[random_index], y_input[random_index]\r\n return X_input_sample, y_input_sample", "def train_dev_split(docs, dev_size):\n pass", "def Sampling(self, path, number):\n allfiles = os.listdir(path)\n for image_name in allfiles:\n number_label = image_name.split('.')[0].split('_')[0]\n self.label_file_map[number_label].append(os.path.join(path, image_name))\n \n # 将样本均匀随机抽样切割成训练集合和测试集合\n training_set = collections.defaultdict(list)\n testing_set = collections.defaultdict(list)\n for label in self.label_file_map:\n file_list = self.label_file_map[label]\n training_set[label] = [file_list[random.randint(0,len(file_list)-1)] for i in range(number)] \n testing_set[label] = set(file_list) - set(training_set[label])\n\n train_x, train_y = self._generate_data_label_pair(len(training_set)*number, 68*68, training_set)\n test_total_num = 0\n for elt in testing_set:\n test_total_num += len(testing_set[elt])\n test_x, test_y = self._generate_data_label_pair(test_total_num, 68*68, testing_set)\n return (train_x, train_y, test_x, test_y)", "def sampling(train_set, train_meta, klass, label, n_samples_pos, rate_neg, fold, path_idxs):\n\tprint('-- SAMPLING TRAINNING')\n\tdirectory_idxs = path_idxs+fold+'/'+str(int(klass))+'/'\n\tif(os.path.isdir(directory_idxs)):\n\t\tprint('loading indexes...')\n\t\tidxs_class_pos = np.loadtxt(directory_idxs+'idxs_pos_train.txt', dtype=int)\n\t\tidxs_class_neg = np.loadtxt(directory_idxs+'idxs_neg_train.txt', dtype=int)\n\telse:\n\t\tidxs_class_pos = (train_meta[ : , label] == klass).nonzero()[0]\n\t\tidxs_class_neg = (train_meta[ : , label] != klass).nonzero()[0]\n\t\tif(n_samples_pos < len(idxs_class_pos)):\n\t\t\tidxs_class_pos = np.random.choice(idxs_class_pos, n_samples_pos)\n\t\tidxs_class_neg = np.random.choice(idxs_class_neg, int(n_samples_pos*rate_neg))\n\t\tprint('saving indexes...')\n\t\tos.makedirs(directory_idxs)\n\t\tnp.savetxt(directory_idxs+'idxs_pos_train.txt', idxs_class_pos, fmt='%d')\n\t\tnp.savetxt(directory_idxs+'idxs_neg_train.txt', idxs_class_neg, fmt='%d')\n\n\ttrain_set = np.vstack((train_set[idxs_class_pos], train_set[idxs_class_neg]))\n\ttrain_meta = np.vstack((train_meta[idxs_class_pos], train_meta[idxs_class_neg]))\n\ttrain_meta[:, label] = 1\n\ttrain_meta[len(idxs_class_pos):, label] = -1\n\treturn [train_set, train_meta]", "def random_subsample(neuron, num):\n\n I = np.arange(neuron.n_soma, neuron.n_node)\n np.random.shuffle(I)\n selected_index = I[0:num - 1]\n selected_index = np.union1d([0], selected_index)\n selected_index = selected_index.astype(int)\n selected_index = np.unique(np.sort(selected_index))\n\n return neuron_with_selected_nodes(neuron, selected_index)", "def subsample(df, freq=2):\n df = df.iloc[::freq, :]\n\n return df", "def sampling(data,classes,others=None,portion=0.9,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this)\n sample_sizes=np.array(sample_sizes,dtype=int)\n sample_sizes=sample_sizes*portion\n sample_sizes=np.array(sample_sizes,dtype=int)\n # set a ceiling/limit\n if max_size_given is not None:\n sample_sizes[sample_sizes>max_size_given]=max_size_given \n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n\n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others", "def load_data(input_file, test_size, random_seed ):\n ratings = np.genfromtxt(input_file, delimiter=',')\n #ratings = ratings[:200,:]\n train_masks = np.zeros_like(ratings)\n test_masks = np.zeros_like(ratings)\n pairs = list()\n for i in range(ratings.shape[0]):\n for j in range(ratings.shape[1]):\n pairs.append((i,j))\n train, test = train_test_split(pairs ,test_size = test_size, random_state= random_seed)\n for user,item in train:\n train_masks[user, item]= 1\n for user,book in test:\n test_masks[user, item]= 1 \n return ratings, train_masks, test_masks", "def train(self, train_X, train_y):\n if self.feat_sel:\n train_X = self.do_feat_sel(train_X, train_y)\n\n train_X, train_y = self.sample.fit_resample(train_X, train_y)\n self.clf.fit(train_X, train_y)", "def test_full_sample(self):\n sample = next(utils.shuffle_in_chunks(data_length=10, chunk_size=10))\n self.assertCountEqual(sample, list(range(10)))", "def split_data(self, model_data, tuning=True):\n pass", "def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)", "def load_and_subsample(raw_img_path, substep, low_freq_percent):\n\n original_img = load_image_data(analyze_img_path=raw_img_path)\n subsampled_img, _ = subsample(\n analyze_img_data=original_img,\n substep=substep,\n low_freq_percent=low_freq_percent)\n\n original_img = np.moveaxis(original_img, -1, 0)\n original_img = np.expand_dims(original_img, -1)\n subsampled_img = np.moveaxis(np.expand_dims(subsampled_img, 3), -2, 0)\n\n num_slices = len(original_img)\n if num_slices > NUM_SAMPLE_SLICES:\n relevant_idx_low = (num_slices - NUM_SAMPLE_SLICES) // 2\n relevant_idx_high = relevant_idx_low + NUM_SAMPLE_SLICES\n relevant_idxs = range(relevant_idx_low, relevant_idx_high)\n\n subsampled_img = subsampled_img[relevant_idxs]\n original_img = original_img[relevant_idxs]\n\n return subsampled_img, original_img", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def subsample(expdat,numreads=10000,inplace=False):\n\timport biom\n\n\tparams=locals()\n\n\tnewexp=hs.filterorigreads(expdat,numreads,inplace)\n\tnewexp=hs.toorigreads(newexp,inplace=True)\n\n\ttable=biom.table.Table(newexp.data,newexp.seqs,newexp.samples)\n\ttable=table.subsample(numreads,axis='observation')\n\ttids=table.ids(axis='sample')\n\tfor idx,cid in enumerate(tids):\n\t\tif not cid==newexp.samples[idx]:\n\t\t\tprint('problem with sample ids!!!!')\n\tnewpos=[]\n\tfor cseq in table.ids(axis='observation'):\n\t\tnewpos.append(newexp.seqdict[cseq])\n\tnewexp=hs.reorderbacteria(newexp,newpos,inplace=True)\n\tnewexp.data=table.matrix_data.todense().A\n\tnewexp=normalizereads(newexp,numreads=10000,inplace=True,fixorig=False)\n\tfor cidx in range(len(newexp.samples)):\n\t\tnewexp.origreads[cidx]=numreads\n\tnewexp=updateorigreads(newexp)\n\tnewexp.filters.append(\"subsample to %d\" % numreads)\n\ths.addcommand(newexp,\"subsample\",params=params,replaceparams={'expdat':expdat})\n\treturn newexp", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set", "def train_test_split(self, test_size=0.1, random_state=0, random_rate_label=True):\r\n data = self._shuffle_set(random_state)\r\n if test_size < 1:\r\n threshold = int(data.shape[0] * test_size)\r\n else:\r\n threshold = test_size\r\n dataset = Set(data[:, :-1], data[:, -1])\r\n if random_rate_label:\r\n split_index = np.random.randint(dataset.labels.shape[0], size=threshold)\r\n else:\r\n split_index = dataset._split_set_index(size=int(threshold / self.targets_count))\r\n X_test = dataset.features[split_index, :]\r\n y_test = dataset.labels[split_index]\r\n X_train = np.delete(dataset.features, split_index, axis=0)\r\n y_train = np.delete(dataset.labels, split_index, axis=0)\r\n return X_train, y_train, X_test, y_test", "def dataset_augmentation(data_start, bootstrapping = 1, epurate = 1, shuffle = True):\n data = data_start\n for ii in range(bootstrapping):\n data = data.append(data_start.apply(bootstrap_sample, axis=1), ignore_index=True)\n\n#Bugged version that weirdly works well....\n# for ii in range(bootstrapping):\n # data = data.append(bootstrap_sample(data_start), ignore_index=True)\n\n for ii in range(epurate):\n data = data.append(data_start.apply(epurate_sample, axis=1), ignore_index=True)\n\n # Shuffling (Important)\n if shuffle == True:\n data = data.sample(frac=1)\n return data", "def train(self, data_source, batch_size=4096):\n self.mean, self.std_dev = stats_batchwise(data_source, batch_size)", "def downsample(self, number):\n self.samples, self.extra_kwargs = _downsample(\n self.samples, number, extra_kwargs=self.extra_kwargs\n )", "def test_subsample_fasta_20(self):\r\n\r\n seed(12210)\r\n\r\n subsample_fasta(self.fasta_filepath, self.output_filepath,\r\n percent_subsample=0.20)\r\n\r\n self._files_to_remove.append(self.output_filepath)\r\n\r\n actual_results =\\\r\n [line.strip() for line in open(self.output_filepath, \"U\")]\r\n\r\n self.assertEqual(actual_results, self.expected_lines_20_perc)", "def _sample_subblocks(threadidx):\n while True:\n try:\n job = jobs.get_nowait()\n except Queue.Empty:\n print \"QUEUE IS EMPTY\"\n return\n print '{0}: processing job {1}'.format(threadidx, job[0])\n\n # sample random superblocks\n features[threadidx] = sample_clips_random(\n video=job[1],\n framesize=cfg.data.superblock_framesize,\n horizon=cfg.data.superblock_horizon,\n temporal_subsampling=cfg.data.temp_subsample,\n nsamples=nsamples_per_file)\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in random samples')\n\n # get blocks from superblocks\n features[threadidx] = sample_clips_dense_from_multiple_videos(\n features[threadidx].reshape((features[threadidx].shape[0],\n cfg.data.superblock_horizon,\n cfg.data.superblock_framesize,\n cfg.data.superblock_framesize)),\n framesize=cfg.data.framesize,\n horizon=cfg.data.horizon,\n temporal_subsampling=cfg.data.temp_subsample,\n stride=cfg.data.stride, verbose=False)\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in sub-samples')\n\n # whiten the samples\n if cfg.pca.method == 'blockwise':\n features[threadidx] = pca.whiten(\n data=features[threadidx], V=V, m0=m0, s0=s0,\n var_fracs=var_fracs, nprincomps=nprinc,use_gpu=False,\n retain_var=cfg.pca.retain_var)\n else:\n features[threadidx] = pca.whiten(\n data=features[threadidx].reshape(\n (features[threadidx].shape[0] * cfg.data.horizon,\n -1)),\n V=V, m0=m0, s0=s0,\n var_fracs=var_fracs, nprincomps=nprinc, use_gpu=False,\n retain_var=cfg.pca.retain_var).reshape(\n (features[threadidx].shape[0], -1))\n\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in whitened samples')\n\n # get the mappings\n model_mutex.acquire()\n features[threadidx] = model.mappingsNonoise_batchwise(\n features[threadidx], batchsize=1000)\n model_mutex.release()\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in mappings')\n # concatenate the mappings\n features[threadidx] = features[threadidx].reshape((\n nsamples_per_file, -1))\n\n input_mutex.acquire()\n kmeanstraindata[\n job[0] * nsamples_per_file:(job[0] + 1) * nsamples_per_file, :] =\\\n features[threadidx]\n # thread 0 should flush\n if threadidx == 0:\n kmeanstrainfile.flush()\n input_mutex.release()", "def getSubClassifierData(subclasses = [2,3], train_data = None, true_classes = None):\n if (train_data is None) or (true_classes is None):\n train_data, true_classes, _ = get_training_data() \n \n assert len(true_classes) == np.shape(train_data)[0]\n \n validsample = np.array([x in subclasses for x in true_classes])\n return train_data[validsample,:], true_classes[validsample]", "def datasubset(loader, start, count, batch_size):\n # Note: start is the start index of batch, not image\n smaller_dataset = []\n end_idx = count / batch_size\n for batch_idx, (orig_images, labels) in enumerate(loader):\n if start <= batch_idx < end_idx:\n smaller_dataset.append((orig_images, labels))\n if batch_idx > end_idx:\n break\n return smaller_dataset", "def get_15k_row_subset(input_file_name, output_file_name):\n df = pd.read_csv(input_file_name)\n df = df.drop(['Unnamed: 0'], axis=1)\n\n df_sample = df.sample(n=15000, replace=False).copy()\n\n df_sample.to_csv(output_file_name)", "def get_samples(self):\n result = []\n segmentsize=30\n # Reduce this to very little to get very large trainingsets\n stride=5\n noOfBuckets=40\n for start in range(0, len(self.data) - segmentsize, stride):\n if start + segmentsize <= len(self.data):\n segments_buckets = self.get_buckets(start, start + segmentsize, noOfBuckets)\n result.append(segments_buckets)\n return result", "def balanced_subsample(y, s):\n sample = []\n # For every label in the dataset\n for label in np.unique(y):\n # Get the index of all images with a specific label\n images = np.where(y==label)[0]\n # Draw a random sample from the images\n random_sample = np.random.choice(images, size=s, replace=False)\n # Add the random sample to our subsample list\n sample += random_sample.tolist()\n return sample", "def test_train_spl(data, testsize):\n test = data.tail(testsize)\n train = data.head(data.shape[0] - testsize)\n return test, train", "def load_partition(idx: int):\r\n assert idx in range(10)\r\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\r\n return (\r\n x_train[idx * 5000 : (idx + 1) * 5000],\r\n y_train[idx * 5000 : (idx + 1) * 5000],\r\n ), (\r\n x_test[idx * 1000 : (idx + 1) * 1000],\r\n y_test[idx * 1000 : (idx + 1) * 1000],\r\n )" ]
[ "0.7140998", "0.708751", "0.6995602", "0.68779194", "0.6740651", "0.66297245", "0.6604621", "0.65915847", "0.65746677", "0.6495264", "0.6429238", "0.6381866", "0.6375742", "0.6355722", "0.63405573", "0.6313042", "0.6300593", "0.629987", "0.623293", "0.62231666", "0.62195325", "0.6200534", "0.61952007", "0.61652976", "0.61559", "0.6142769", "0.6142435", "0.6092398", "0.60922784", "0.60916245", "0.60864294", "0.6054797", "0.6047336", "0.60385925", "0.60235614", "0.60077864", "0.60034114", "0.6002524", "0.5992299", "0.59876806", "0.5987478", "0.5983582", "0.59812945", "0.5958369", "0.594405", "0.5918634", "0.59181935", "0.59153306", "0.59127843", "0.59115356", "0.5908339", "0.5907041", "0.5896112", "0.58857584", "0.58742833", "0.5869457", "0.58653104", "0.58592534", "0.583834", "0.58379745", "0.5837718", "0.58348674", "0.5827301", "0.5822001", "0.5821542", "0.58171856", "0.5813596", "0.5797308", "0.5794459", "0.57901794", "0.5781343", "0.57801425", "0.5777697", "0.5770638", "0.5766141", "0.576251", "0.57577896", "0.57556987", "0.5750333", "0.5738029", "0.573426", "0.57324195", "0.57323104", "0.5728793", "0.57243115", "0.57243115", "0.57236254", "0.571315", "0.5711716", "0.5709638", "0.57090586", "0.5706496", "0.56999034", "0.5699073", "0.5697889", "0.5694241", "0.5692694", "0.56888986", "0.56850076", "0.56800866" ]
0.6540765
9
Dataloader type for this task
def dataloader(self): return DataLoader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_dataloader(self):\n shuffle = True if self.mode == \"train\" else False\n return DataLoader(self.get_dataset(), batch_size=self.batch_size, shuffle = shuffle, \n collate_fn=create_mini_batch)", "def get_dataloader(self, cid, batch_size=None, type=\"train\"):\n dataset = self.get_dataset(cid, type)\n batch_size = len(dataset) if batch_size is None else batch_size\n data_loader = DataLoader(dataset, batch_size=batch_size)\n return data_loader", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def get_data_loader(\n name: str, **kwargs) -> data_loader.DataLoader:\n\n if name == \"synthetic_images\":\n logging.info(\"Creating synthetic image data loader.\")\n return synthetic_image.SyntheticImageDataLoader(**kwargs)\n elif name == \"synthetic_bert\":\n logging.info(\"Creating synthetic bert data loader.\")\n return synthetic_bert.SyntheticBertLoader(**kwargs)\n elif name == \"squad_bert\":\n logging.info(\"Creating SQuAD 1.1 bert data loader.\")\n return squad_bert.SquadBertLoader(**kwargs)\n elif name == \"sentiment_bert\":\n logging.info(\"Creating IMDB sentiment analysis data loader.\")\n return generic_jsonl.GenericJsonlLoader(**kwargs)\n elif name == \"criteo\":\n logging.info(\"Creating Criteo data loader.\")\n return criteo.CriteoLoader(**kwargs)\n elif name == \"generic_jsonl\":\n logging.info(\"Creating generic jsonl file data loader.\")\n return generic_jsonl.GenericJsonlLoader(**kwargs)\n else:\n raise ValueError(\"Unsupported data loader type.\")", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def val_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_valid, **self.dl_kwargs)", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def task_type(self):\n pass", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def get_train_dataloader(self):\n if self.train_dataloader is not None:\n return self.train_dataloader\n\n assert self.schema is not None, \"schema is required to generate Train Dataloader\"\n return T4RecDataLoader.parse(self.args.data_loader_engine).from_schema(\n self.schema,\n self.train_dataset_or_path,\n self.args.per_device_train_batch_size,\n max_sequence_length=self.args.max_sequence_length,\n drop_last=self.args.dataloader_drop_last,\n shuffle=True,\n shuffle_buffer_size=self.args.shuffle_buffer_size,\n )", "def __get_dataset_type(dataset):\n op_type = None\n if isinstance(dataset, de.ShuffleDataset):\n op_type = OpName.SHUFFLE\n elif isinstance(dataset, de.MindDataset):\n op_type = OpName.MINDRECORD\n elif isinstance(dataset, de.BatchDataset):\n op_type = OpName.BATCH\n elif isinstance(dataset, de.SyncWaitDataset):\n op_type = OpName.BARRIER\n elif isinstance(dataset, de.ZipDataset):\n op_type = OpName.ZIP\n elif isinstance(dataset, de.ConcatDataset):\n op_type = OpName.CONCAT\n elif isinstance(dataset, de.MapDataset):\n op_type = OpName.MAP\n elif isinstance(dataset, de.FilterDataset):\n op_type = OpName.FILTER\n elif isinstance(dataset, de.RepeatDataset):\n op_type = OpName.REPEAT\n elif isinstance(dataset, de.SkipDataset):\n op_type = OpName.SKIP\n elif isinstance(dataset, de.TakeDataset):\n op_type = OpName.TAKE\n elif isinstance(dataset, de.ImageFolderDatasetV2):\n op_type = OpName.IMAGEFOLDER\n elif isinstance(dataset, de.GeneratorDataset):\n op_type = OpName.GENERATOR\n elif isinstance(dataset, de.TransferDataset):\n op_type = OpName.DEVICEQUEUE\n elif isinstance(dataset, de.RenameDataset):\n op_type = OpName.RENAME\n elif isinstance(dataset, de.TFRecordDataset):\n op_type = OpName.TFREADER\n elif isinstance(dataset, de.ProjectDataset):\n op_type = OpName.PROJECT\n elif isinstance(dataset, de.MnistDataset):\n op_type = OpName.MNIST\n elif isinstance(dataset, de.ManifestDataset):\n op_type = OpName.MANIFEST\n elif isinstance(dataset, de.VOCDataset):\n op_type = OpName.VOC\n elif isinstance(dataset, de.Cifar10Dataset):\n op_type = OpName.CIFAR10\n elif isinstance(dataset, de.Cifar100Dataset):\n op_type = OpName.CIFAR100\n elif isinstance(dataset, de.CelebADataset):\n op_type = OpName.CELEBA\n elif isinstance(dataset, de.RandomDataset):\n op_type = OpName.RANDOMDATA\n elif isinstance(dataset, de.TextFileDataset):\n op_type = OpName.TEXTFILE\n else:\n raise ValueError(\"Unsupported DatasetOp\")\n\n return op_type", "def get_dataloader(params, format_name='hdf5', **kwargs):\n \n Provider = get_proper_provider(format_name)(params.modality)\n \n return DataLoader(Provider(params.dataset_path,\n seq_length=params.seq_length),\n batch_size=params.batch_size,\n shuffle=params.is_training,\n num_workers=params.num_workers,\n pin_memory=params.cuda,\n collate_fn=pad_collate)", "def getDataSetType(self):\n return self.__data_set_type__", "def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def data_loader(self, url, type_of):\n\n data_loader = None\n if type_of == \"csv\":\n data_loader = self.csv\n elif type_of == \"json\":\n data_loader = self.json\n elif type_of == \"parquet\":\n data_loader = self.parquet\n elif type_of == \"avro\":\n data_loader = self.avro\n else:\n RaiseIt.type_error(data_loader, [\"csv\", \"json\", \"parquet\", \"avro\", ])\n\n i = url.rfind('/')\n data_name = url[(i + 1):]\n data_def = {\n \"displayName\": data_name,\n \"url\": url\n }\n return Downloader(data_def).download(data_loader, type_of)", "def get_dataloader(hp: HParams) \\\n -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, int]:\n if hp.data.dataset == \"podcast\":\n dataset = podcast.PODCAST(root=hp.data.path,\n audio_folder=hp.data.audio_folder,\n text_file=hp.data.text_file)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n\n # https://towardsdatascience.com/7-tips-for-squeezing-maximum-performance-from-pytorch-ca4a40951259\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"librispeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = librispeech.download_data(root=hp.data.path, url=hp.data.url)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"ljspeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = ljspeech.download_data(root=hp.data.path)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n else:\n raise Exception(f\"Dataset {hp.data.dataset} does not exist\")", "def get_data_loaders(opt):\n return find_dataloader_using_name(opt.dataloader)(opt).load_data()", "def task_type(cls):\r\n raise NotImplementedError()", "def get_test_dataset_DataLoader(self):\n test_info = self.get_test_DataLoader_info()\n name = test_info[\"name\"]\n task = test_info[\"task\"]\n data_dir = test_info[\"data_dir\"]\n hdf5_file = test_info[\"hdf5_file\"]\n\n data_loader = DataLoader(name, task, data_dir, hdf5_file)\n\n return data_loader, self.dataset, self.data_fields", "def get_loader(config):\n train_transform = [T.Resize((256, 128)), T.RandomHorizontalFlip(), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n train_transform = T.Compose(train_transform)\n\n test_transform = [T.Resize((256, 128)), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n test_transform = T.Compose(test_transform)\n\n # Datasets.\n if config.source_dataset in ['duke'] and config.target_dataset in ['market']:\n source_image_dir = config.duke_image_dir\n target_image_dir = config.market_image_dir\n elif config.source_dataset in ['market'] and config.target_dataset in ['duke']:\n source_image_dir = config.market_image_dir\n target_image_dir = config.duke_image_dir\n else:\n assert 'Dataset not support!'\n source_set = ReidDataset(source_image_dir, train_transform)\n target_set = ReidDataset(target_image_dir, train_transform, config.expanding_cam)\n test_set = ReidDataset(source_image_dir, test_transform)\n\n # Dataloader.\n source_loader = data.DataLoader(dataset=source_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n target_loader = data.DataLoader(dataset=target_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n test_loader = data.DataLoader(dataset=test_set, batch_size=config.batch_size, num_workers=config.num_workers,\n shuffle=False, pin_memory=True, drop_last=False)\n\n return {'source_loader': source_loader, 'target_loader': target_loader, 'test_loader': test_loader}", "def type(self) -> 'Data_Type':\n return Data_Type(self._info.htype, self._info.ptype)", "def data_type():\n return DataTypeUtil.getDTypeForName(DataTypeUtil.getDtypeFromContext())", "def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler", "def get_loader(file_path, src_word2id, trg_word2id, intent_type=None, intent2index=None, batch_size=1):\n dials = json.load(open(file_path))\n dataset_list = []\n for name in dials.keys():\n val_file = dials[name]\n # build a custom dataset\n dataset = MultiwozSingleDataset(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)\n dataset_list.append(dataset)\n datasets = ConcatDataset(dataset_list)\n # data loader for custome dataset\n data_loader = DataLoader(dataset=datasets,\n batch_size=batch_size,\n shuffle=True,\n num_workers=0,\n collate_fn=collate_fn)\n return data_loader", "async def handle_ledertype(self):\n rows = self._load_csv_if_newer(Ledertype)\n return await self._create_classes_from_csv(Ledertype, rows)", "def task_type(cls):\n raise NotImplementedError()", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def data_type(self) -> int:\n return self.data[\"args\"][\"dataType\"]", "def preload_data_type(self) -> pulumi.Input['FhirDatastorePreloadDataConfigPreloadDataType']:\n return pulumi.get(self, \"preload_data_type\")", "def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def build_dataloader(bs, shfle):\n # change get_labels to correct version (classification vs regression)\n dataset = TensorDataset(rand_data(), get_labels())\n dataset = TensorDataset(rand_data(), get_regression_labels())\n\n return DataLoader(dataset, batch_size=bs, shuffle=shfle, num_workers=0)", "def _create_data_loader(self, data, **kwargs):\n if data is None:\n return None\n\n # Set DataLoader config\n # NOTE: Not applicable if data is already a DataLoader\n config = {\n **self.config[\"train_config\"][\"data_loader_config\"],\n **kwargs,\n \"pin_memory\": self.config[\"device\"] != \"cpu\",\n }\n # Return data as DataLoader\n if isinstance(data, DataLoader):\n return data\n elif isinstance(data, Dataset):\n return DataLoader(data, **config)\n elif isinstance(data, (tuple, list)):\n return DataLoader(self._create_dataset(*data), **config)\n else:\n raise ValueError(\"Input data type not recognized.\")", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def data_loader(self, url, type_of):\n i = url.rfind('/')\n data_name = url[(i + 1):]\n data_def = {\n \"displayName\": data_name,\n \"url\": url\n }\n if type_of == \"csv\":\n data_loader = self.csv_data_loader\n else:\n data_loader = self.json_data_loader\n\n return Downloader(data_def).download(data_loader)", "def fetch_dataloader(types, dataset_dir, params):\n\n dataloaders = {}\n samplers = {}\n\n for split in ['train', 'val', 'test']:\n if split in types:\n path = os.path.join(dataset_dir, \"{}\".format(split))\n\n # Use the train_transformer if training data, else use eval_transformer without random flip\n # take care of 'pin_memory' and 'num_workers'\n if split == 'train':\n train_set = BaseDataset(path, train_transformer)\n sampler = None\n if params.distributed:\n sampler = torch.utils.data.distributed.DistributedSampler(\n train_set)\n dataloader = DataLoader(\n train_set,\n batch_size=params.batch_size_pre_gpu,\n shuffle=(sampler is None),\n num_workers=params.num_workers,\n pin_memory=params.cuda,\n sampler=sampler)\n\n else:\n val_set = BaseDataset(path, eval_transformer)\n sampler = None\n if params.distributed:\n sampler = torch.utils.data.distributed.DistributedSampler(\n val_set)\n dataloader = DataLoader(\n val_set,\n batch_size=params.batch_size_pre_gpu,\n shuffle=False,\n pin_memory=params.cuda,\n num_workers=params.num_workers,\n sampler=sampler)\n\n dataloaders[split] = dataloader\n samplers[split] = sampler\n\n return dataloaders, samplers", "def to_DataLoader(self, **kwargs):\r\n return DataLoader(self, **kwargs)", "def generate_dataloader(\n design_matrix: Union[np.ndarray, torch.Tensor],\n targets_array: Union[np.array, torch.Tensor],\n data_type: torch.dtype = torch.float32,\n batch_size: int = 256,\n shuffle: bool = False,\n num_workers: int = 0,\n ):\n dataloader_wrapper_args = {\n \"batch_size\": batch_size,\n \"shuffle\": shuffle,\n \"num_workers\": num_workers,\n }\n assert type(design_matrix) in (torch.Tensor, np.ndarray)\n assert type(targets_array) in (torch.Tensor, np.ndarray, np.array)\n\n if type(design_matrix) is np.ndarray:\n design_matrix_as_torch_tensor = torch.from_numpy(design_matrix).type(data_type)\n else:\n design_matrix_as_torch_tensor = design_matrix.type(data_type)\n\n if type(targets_array) in (np.array, np.ndarray):\n targets_array_as_torch_tensor = torch.from_numpy(targets_array).type(data_type)\n else:\n targets_array_as_torch_tensor = targets_array.type(data_type)\n dataset = torch.utils.data.TensorDataset(design_matrix_as_torch_tensor, targets_array_as_torch_tensor)\n dataloader = torch.utils.data.DataLoader(dataset, **dataloader_wrapper_args)\n return dataloader", "def get_data_class(self):\n return self.data_class", "def build_dataloader(cfg, augmentor=None, mode='train', dataset=None, rank=None,\n dataset_class=VolumeDataset, dataset_options={}, cf=collate_fn_train):\n assert mode in ['train', 'val', 'test']\n print('Mode: ', mode)\n\n if mode == 'train':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH\n elif mode == 'val':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH * 4\n else:\n cf = collate_fn_test # update the collate function\n batch_size = cfg.INFERENCE.SAMPLES_PER_BATCH * cfg.SYSTEM.NUM_GPUS\n\n if dataset is None: # no pre-defined dataset instance\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n dataset_class = VolumeDatasetMultiSeg\n dataset = get_dataset(cfg, augmentor, mode, rank, dataset_class, dataset_options)\n\n sampler = None\n num_workers = cfg.SYSTEM.NUM_CPUS\n if cfg.SYSTEM.DISTRIBUTED:\n num_workers = cfg.SYSTEM.NUM_CPUS // cfg.SYSTEM.NUM_GPUS\n if cfg.DATASET.DISTRIBUTED == False:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n\n # In PyTorch, each worker will create a copy of the Dataset, so if the data\n # is preload the data, the memory usage should increase a lot.\n # https://discuss.pytorch.org/t/define-iterator-on-dataloader-is-very-slow/52238/2\n img_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, collate_fn=cf,\n sampler=sampler, num_workers=num_workers, pin_memory=True)\n\n return img_loader", "def find_dataloader_using_name(dataloader_name):\n\n dataloader_filename = \"data.\" + dataloader_name + \"_dataloader\"\n dataloaderlib = importlib.import_module(dataloader_filename)\n\n dataloader = None\n data_loader_name = dataloader_name\n target_dataloader_name = data_loader_name.replace('_', '') + 'dataloader'\n for name, cls in dataloaderlib.__dict__.items():\n print(\"Comparing {0} with {1}\".format(name.lower(), target_dataloader_name.lower()))\n if name.lower() == target_dataloader_name.lower():\n dataloader = cls\n\n if dataloader is None:\n raise NotImplementedError(\"In %s.py, there should be a class named %s in lowercase.\" % (dataloader_filename,\n target_dataloader_name))\n\n return dataloader", "def get_loader(data, json, batch_size, shuffle, num_workers):\n dataset = FinNumDataset(data, json)\n\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def LoadType(self):\n\t\treturn self._get_attribute('loadType')", "def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, noise_level=0, net_id=None, total=0):\n if dataset in ('mnist', 'femnist', 'fmnist', 'cifar10','cifar100', 'svhn', 'generated', 'covtype', 'a9a', 'rcv1', 'SUSY','tinyimagenet'):\n if dataset == 'mnist':\n dl_obj = MNIST_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'femnist':\n dl_obj = FEMNIST\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'fmnist':\n dl_obj = FashionMNIST_truncated\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'svhn':\n dl_obj = SVHN_custom\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n\n elif dataset == 'cifar10':\n dl_obj = CIFAR10_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: F.pad(\n Variable(x.unsqueeze(0), requires_grad=False),\n (4, 4, 4, 4), mode='reflect').data.squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n \n elif dataset == 'cifar100':\n dl_obj = CIFAR100_truncated\n\n normalize = transforms.Normalize(mean=[0.5070751592371323, 0.48654887331495095, 0.4409178433670343],\n std=[0.2673342858792401, 0.2564384629170883, 0.27615047132568404])\n # transform_train = transforms.Compose([\n # transforms.RandomCrop(32),\n # transforms.RandomHorizontalFlip(),\n # transforms.ToTensor(),\n # normalize\n # ])\n transform_train = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n normalize\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize])\n\n elif dataset == 'tinyimagenet': \n # random_ids = np.random.randint(1000, size=datasize)\n # train_indices = random_ids\n\n imagenet_mean = [0.485, 0.456, 0.406]\n imagenet_std = [0.229, 0.224, 0.225]\n\n train_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/train\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=train_bs, drop_last=True)\n \n test_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/test\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=test_bs, drop_last=True)\n\n return train_dl, test_dl, None, None\n\n\n else:\n dl_obj = Generated\n transform_train = None\n transform_test = None\n\n\n train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)\n test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)\n\n train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last= dataset in ['cifar100'])\n test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)\n\n return train_dl, test_dl, train_ds, test_ds", "def type(self):\n # easy enough\n return self._dataset._pyre_id.type", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n query_type = kwargs['query_type']\n \n self._results_ = None\n \n if cmp(FDH._QTYPE_BANNER_, query_type) == 0:\n self._query_name_ = 'report_banner_metrics'\n elif cmp(FDH._QTYPE_LP_, query_type) == 0:\n self._query_name_ = 'report_LP_metrics'\n elif cmp(FDH._QTYPE_BANNER_LP_, query_type) == 0:\n self._query_name_ = 'report_bannerLP_metrics'\n elif cmp(FDH._QTYPE_DONATIONS_, query_type) == 0:\n self._query_name_ = 'report_donation_metrics'\n elif cmp(FDH._QTYPE_TOTAL_, query_type) == 0:\n self._query_name_ = 'report_total_metrics'\n elif cmp(FDH._QTYPE_TOTAL_DONATIONS_, query_type) == 0:\n self._query_name_ = 'report_total_donations'", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._query_names_[FDH._QTYPE_BANNER_] = 'report_banner_metrics_minutely'\n self._query_names_[FDH._QTYPE_LP_] = 'report_LP_metrics_minutely'\n self._query_names_[FDH._QTYPE_BANNER_LP_] = 'report_bannerLP_metrics_minutely'\n self._query_names_['campaign'] = 'report_campaign_metrics_minutely'\n self._query_names_['campaign_total'] = 'report_campaign_metrics_minutely_total'\n \n self._query_names_[FDH._QTYPE_BANNER_ + FDH._QTYPE_TIME_] = 'report_banner_metrics_minutely_all'\n self._query_names_[FDH._QTYPE_LP_ + FDH._QTYPE_TIME_] = 'report_lp_metrics_minutely_all'\n self._query_names_[FDH._QTYPE_CAMPAIGN_ + FDH._QTYPE_TIME_] = 'report_campaign_metrics_minutely_all'\n \n self._query_type_ = kwargs['query_type']\n \n \"\"\" hardcode the data handler for now \"\"\"\n self._data_handler_ = FDH\n \n self._summary_data_ = None", "def create_dataloader(datafile, dataset_type, batch_size, mechanism, shuffle=False):\n dataset = MazeDataset(datafile, dataset_type)\n assert dataset.num_actions == mechanism.num_actions\n return torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0)", "def type(self):\n return _python_type_map[self.arrow_dtype.id]", "def dataset(options):\n pass", "def prepare_dataloader(opt, dataobj):\n\n def load_data(name):\n with open(name, 'rb') as f:\n data = pickle.load(f)\n num_types = 1 # There is no event type prediction, hence using a dummy value, this will basically be a constant value field\n return data, num_types\n\n print('[Info] Loading train data...')\n train_data, num_types = load_data(opt.data + 'train_ny.pkl')\n print('[Info] Loading dev data...')\n val_data, _ = load_data(opt.data + 'val_ny.pkl')\n print('[Info] Loading test data...')\n test_data, _ = load_data(opt.data + 'test_ny.pkl')\n\n trainloader = get_dataloader(train_data, opt.batch_size, shuffle=True)\n validationloader = get_dataloader(val_data, opt.batch_size, shuffle=True)\n testloader = get_dataloader(test_data, opt.batch_size, shuffle=False)\n return trainloader, validationloader, testloader, num_types", "def required_data_type(self):\n return Data", "def __init__(self,manager,name):\n Online.DatapointLoader.DatapointLoader.__init__(self,manager,name)\n self.dpName = self.name\n self.activityName = self.dp('Name')\n self.farmInfrastructure = self.dp('Farm.Infrastructure')\n self.farmWorker = self.dp('Farm.Worker')\n self.ctrlInfrastructure = self.dp('Control.Infrastructure')\n self.inputInfrastructure = self.dp('Storage.streamInfrastructure')\n self.inputTypes = self.dp('Storage.streamTypes')\n self.outputInfrastructure = self.dp('Storage.recvInfrastructure')\n self.outputTypes = self.dp('Storage.recvTypes')\n self.addDp(self.reader)", "def GetType(vDataSet):\r\n return imaris_types[str(vDataSet.GetType())]", "def train_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.train,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=True if self.train_sampler is None else False,\n num_workers=self.config.num_workers,\n sampler=self.train_sampler,\n pin_memory=self.config.pin_memory,\n )", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def make_standard_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.batch_size,\n shuffle=False,\n drop_last=False,\n pin_memory=not (cfg.DEBUG > 0),\n num_workers=self.num_workers,\n )", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def type(self):\n return self.data.type", "def fetch_dataloader(types, data_dir, params):\n dataloaders = {}\n\n for split in ['train', 'val', 'test']:\n if split in types:\n path = os.path.join(data_dir, \"{}_signs\".format(split))\n\n # use the train_transformer if training data, else use eval_transformer without random flip\n if split == 'train':\n dl = DataLoader(SIGNSDataset(path, train_transformer), batch_size=params.batch_size, shuffle=True,\n num_workers=params.num_workers,\n pin_memory=params.cuda)\n else:\n dl = DataLoader(SIGNSDataset(path, eval_transformer), batch_size=params.batch_size, shuffle=False,\n num_workers=params.num_workers,\n pin_memory=params.cuda)\n\n dataloaders[split] = dl\n\n return dataloaders", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n \"\"\" Use _query_names_ to store a single query name \"\"\"\n self._query_names_ = 'report_donor_dollar_breakdown' # embed the query name in the class itself\n self._query_type_ = kwargs['query_type']", "def get_loader(dataset_name, **kwargs):\n if dataset_name == \"rostd\":\n loader = partial_class(ROSTDLoader,\n data_root_dir=\"data/rostd\",\n use_coarse_labels=False)\n elif dataset_name == \"rostd_coarse\":\n loader = partial_class(ROSTDLoader,\n data_root_dir=\"data/rostd\",\n use_coarse_labels=True)\n elif dataset_name == \"snips_75\":\n loader = partial_class(SNIPSLoader,\n data_root_dir=\"data/snips\",\n K=75, version=kwargs['version'])\n elif dataset_name == \"snips_25\":\n loader = partial_class(SNIPSLoader,\n data_root_dir=\"data/snips\",\n K=25, version=kwargs['version'])\n elif dataset_name == \"clinc\":\n loader = partial_class(CLINC150Loader,\n data_path=\"data/clinc/data_full.json\",\n unsupervised=True)\n elif dataset_name == \"clinc_sup\":\n loader = partial_class(CLINC150Loader,\n data_path=\"data/clinc/data_full.json\",\n unsupervised=False)\n elif dataset_name == 'sst':\n loader = partial_class(SSTLoader,\n data_root_dir=\"data/sst\",\n ood_type=kwargs['ood_type'])\n else:\n raise RuntimeError(f\"Bad dataset: {dataset_name}\")\n return loader", "def __init__(self,manager,name):\n Online.DatapointLoader.DatapointLoader.__init__(self,manager,name)\n self.dpName = self.name\n self.runType = self.dp('general.runType')\n self.partitionName = self.dp('general.partName')\n self.partitionID = self.dp('general.activePartId')\n self.nSubFarm = self.dp('HLTFarm.nSubFarms')\n self.subfarms = self.dp('HLTFarm.subFarms')\n self.addDp(self.reader)", "def get_dataset(opts):\n dataset_type = opts.dataset_params.dataset_type\n if dataset_type in 'synth':\n return synthgraph.SynthGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthnoise':\n return synthgraph.SynthNoiseGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthoutlier':\n return synthgraph.SynthOutlierGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'rome16kgeom':\n return spreal.GeomKNNRome16KDataset(opts, opts.dataset_params)\n elif dataset_type in 'graffiti':\n return graffiti.GraffitiDataset(opts, opts.dataset_params)\n else:\n print(\"ERROR: Dataset type {} not implemented yet\".format(dataset_type))\n sys.exit(1)", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def __init__(self,manager,name):\n Online.DatapointLoader.DatapointLoader.__init__(self,manager,name)\n self.dpName = self.name\n self.inuse = self.dp('InUse')\n self.slice = self.dp('FSMSlice')\n self.info = self.dp('RunInfo')\n self.addDp(self.reader)", "def get_loader(path, batch_size, device, directory_name):\n if str(device) == \"cpu\":\n path = \"/home/amit/PycharmProjects/ML_Project_1/nsynth-test\"\n # audio samples are loaded as an int16 numpy array\n # rescale intensity range as float [-1, 1]\n toSelectCols = transforms.Lambda(lambda x: x[0:16000])\n toFloat = transforms.Lambda(lambda x: x / np.iinfo(np.int16).max + 1)\n # use instrument_family and instrument_source as classification targets\n dataset = NSynth(\n path,\n transform=transforms.Compose([toSelectCols, toFloat]),\n blacklist_pattern=[\"synth_lead\"], # blacklist string instrument\n categorical_field_list=[\"instrument_family\", \"instrument_source\"])\n\n print(path, \"Length: \", len(dataset))\n plot_waveforms(dataset[0][0], \"1-D_audio_waveform.png\", \"1-D audio waveform\", directory_name)\n return dataset, torch_data.DataLoader(dataset, batch_size=batch_size, shuffle=True)", "def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset", "def datasource_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"datasource_type\")", "def datasource_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"datasource_type\")", "def datasource_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"datasource_type\")", "def datasource_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"datasource_type\")", "def _source_type(self):\n pass", "def data_type_id(self) -> str:\n return self._data_type_id", "def __init__(self, data_type=None):\n self.type = data_type", "def get_dataloader(self, hdf_path, data_description=None):\r\n if data_description is None:\r\n data_description = \"data\"\r\n print(f\"* Loading preprocessed {data_description}.\", flush=True)\r\n\r\n dataset = HDFDataset(hdf_path)\r\n dataloader = BlockDataLoader(dataset=dataset,\r\n batch_size=self.C.batch_size,\r\n block_size=self.C.block_size,\r\n shuffle=True,\r\n n_workers=self.C.n_workers,\r\n pin_memory=True)\r\n self.print_time_elapsed()\r\n\r\n return dataloader", "def _dataset_name(self):\n return f'Libri{self.task}Mix'", "def test_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n if self.test is not None:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.test,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=False,\n num_workers=self.config.num_workers,\n pin_memory=self.config.pin_memory,\n )", "def datatype(self):\n # datatype is type of first dataarg\n return self[self.dataargs()[0]].typename", "def datasource_type(self) -> Optional[str]:\n return pulumi.get(self, \"datasource_type\")", "def datasource_type(self) -> Optional[str]:\n return pulumi.get(self, \"datasource_type\")", "def as_dataset(self) -> \"Dataset\":\n \n freq_def = {\n 1: \"L1\", # G\n 2: \"L2\", # G\n 5: \"L5\", # G\n 20: \"L2C\", # G\n 101: \"L1\", # R\n 102: \"L2\", # R\n 201: \"E1\", # E \n 205: \"E5a\", # E\n 206: \"E6\", # E\n 207: \"E5b\", # E\n 208: \"E5\", # E\n 302: \"B1_2\", # C\n 306: \"B3\", # C\n 307: \"B2b\", # C\n }\n\n float_fields = {\n \"amplitude\": None,\n \"azimuth\": \"radian\",\n \"peak2noise\": None, \n \"reflection_height\": \"meter\", \n }\n\n # Initialize dataset\n dset = dataset.Dataset()\n if not self.data:\n log.warn(\"No data in {self.file_path}.\")\n return dset\n dset.num_obs = len(self.data[\"time\"])\n\n # Add text fields\n satellite = list()\n system = list()\n for sat in self.data[\"satellite\"]:\n if sat >= 1 and sat < 100: # GPS satellites\n system.append(\"G\")\n satellite.append(\"G\" + str(int(sat)).zfill(2))\n elif sat >= 101 and sat < 200: # GLONASS satellites\n system.append(\"R\")\n satellite.append(\"R\" + str(int(sat))[1:3])\n elif sat >= 201 and sat < 300: # Galileo satellites\n system.append(\"E\")\n satellite.append(\"E\" + str(int(sat))[1:3])\n elif sat >= 301 and sat < 400: # BeiDou satellites\n system.append(\"C\")\n satellite.append(\"C\" + str(int(sat))[1:3])\n else:\n log.fatal(\"GNSSREFL satellite number {sat} is not defined. Valid satellite numbers are between [1-399].\")\n\n dset.add_text(\n name=\"system\", \n val=system, \n write_level=\"operational\",\n )\n\n dset.add_text(\n name=\"satellite\", \n val=satellite, \n write_level=\"operational\",\n )\n\n dset.add_text(\n name=\"frequency\", \n val=[freq_def[v] for v in self.data[\"frequency\"]], \n write_level=\"operational\",\n ) \n \n # Add time field\n dset.add_time(\n name=\"time\", \n val=self.data[\"time\"], \n scale=\"utc\", \n fmt=\"datetime\", \n write_level=\"operational\",\n )\n \n # Add float fields\n for field in float_fields.keys():\n if field not in self.data.keys():\n log.warn(f\"Field '{field}' does not exist in file {self.meta['__data_path__']}.\")\n continue\n \n value = np.deg2rad(self.data[field]) if field == \"azimuth\" else self.data[field]\n unit = \"\" if float_fields[field] is None else float_fields[field]\n \n dset.add_float(name=field, val=value, unit=unit, write_level=\"operational\")\n \n return dset", "def get_dataset_sampler(self):\n return None", "def get_dataloader(sets, root_dir, manifest_path, task, batch_size=1, return_pid = False):\n data_loaders = {}\n\n for set in ['train', 'valid', 'test', 'all_images']: # test doesn't apply to MRNet but will keep in\n if set in sets:\n if set == 'train':\n ds = Dataset(set='train', task = task, root_dir=root_dir, manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n #transforms.RandomHorizontalFlip(), # default is 50%\n #transforms.RandomAffine(25, # rotation\n # translate=(0.1, 0.1),\n # shear = (-15, 15)),\n transforms.ToTensor(),\n ]))\n loader = DataLoader(ds, batch_size=batch_size, shuffle=True)\n elif set == 'valid':\n ds = Dataset(set='valid', task = task, root_dir=root_dir,manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n ]))\n\n loader = DataLoader(ds, batch_size=batch_size, shuffle=False)\n elif set == 'all_images':\n ds = Dataset(set='all_images', task = task, root_dir=root_dir,manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n ]))\n loader = DataLoader(ds, batch_size=batch_size, shuffle=False)\n data_loaders[set] = loader\n return (data_loaders)", "def get_dataloaders(datasets, split, args, is_eval=False):\n dataloaders = []\n for task, dataset in datasets.items():\n if is_eval:\n num_rows = dataset.num_rows if args.eval_rows == -1 else args.eval_rows\n else:\n num_rows = dataset.num_rows if args.train_rows == -1 else args.train_rows\n all_input_ids = np.zeros([num_rows, args.max_length])\n all_attention_mask = np.zeros([num_rows, args.max_length])\n all_token_type_ids = np.zeros([num_rows, args.max_length])\n for i in range(num_rows):\n features = dataset[i]\n curr_len = len(features[\"attention_mask\"])\n all_input_ids[i,:curr_len] = features[\"input_ids\"]\n all_attention_mask[i,:curr_len] = features[\"attention_mask\"]\n all_token_type_ids[i,:curr_len] = features[\"token_type_ids\"]\n all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)\n all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)\n all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)\n all_label = torch.tensor(dataset[:num_rows][\"label\"], dtype=torch.long)\n if task == \"stsb\":\n all_label = all_label.float()\n \n data = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label)\n if split in [\"train\", \"support\"]:\n sampler = RandomSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.train_batch_size)\n else:\n sampler = SequentialSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.eval_batch_size)\n dataloaders.append(dataloader)\n return dataloaders", "def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def get_loader(df=data):\n return dcc.Loading(\n className='loader',\n id='loading',\n type='default',\n children=[\n dcc.Markdown(id='data_summary_filtered', children=f'{len(df):,d} taxi trips selected'),\n html.Progress(id='selected_progress', max=f'{initial_length}', value=f'{len(df)}'),\n ]\n )", "def fetch_dataloader(types, params, CViters):\n\tdataloaders = {}\n\tassert CViters[0] != CViters[1], 'ERROR! Test set and validation set cannot be the same!'\n\t\n\tif len(types)>0:\n\t\tfor split in types:\n\t\t\tif split in ['train', 'val', 'test']:\n\t\t\t\tdl = DataLoader(imageDataset(split, params, CViters), batch_size=params.batch_size, shuffle=True,\n\t\t\t\t\tnum_workers=params.num_workers,\n\t\t\t\t\tpin_memory=params.cuda)\n\n\t\t\t\tdataloaders[split] = dl\n\telse:\n\t\tdl = DataLoader(imageDataset('', params, CViters), batch_size=params.batch_size, shuffle=True,\n\t\t\tnum_workers=params.num_workers,\n\t\t\tpin_memory=params.cuda)\n\n\t\treturn dl\n\n\treturn dataloaders", "def create_dataloaders(data_dir):\n\n trng_dataset = datasets.ImageFolder(data_dir / TRNG_FOLDER,\n transform=flowernet.trng_transform)\n trng_dataloader = torch.utils.data.DataLoader(trng_dataset,\n batch_size=64,\n shuffle=True)\n\n valn_dataset = datasets.ImageFolder(data_dir / VALN_FOLDER,\n transform=flowernet.pred_transform)\n valn_dataloader = torch.utils.data.DataLoader(valn_dataset,\n batch_size=64,\n shuffle=True)\n\n return trng_dataloader, valn_dataloader", "def data_type(self) -> pulumi.Input['AssetModelDataType']:\n return pulumi.get(self, \"data_type\")", "def fetch_dataloader(types, data_dir, hyper_params, train_idx=None, val_idx=None):\n dataloaders = {}\n \n # TODO: write this to hyper_params, make hyper_params an out variable? then save? yes, AND: when ONLY test is requested, load from hyperparams!\n # TODO: also, add 3rd variation of types: for testing, only read it from hyper_params (DO I NEED TO READ HYPER_PARAMS FOR JUST TESTING?)\n if train_idx is not None:\n mean, std = mean_std_calc(DataLoader(Subset(Heart2DSegmentationDataset(str(Path(data_dir) / \"train_heart_scans\"), hyper_params.endo_or_epi), train_idx)))\n hyper_params.mean = mean.item()\n hyper_params.std = std.item()\n else:\n if 'train' in types:\n mean, std = mean_std_calc(DataLoader(Heart2DSegmentationDataset(str(Path(data_dir) / \"train_heart_scans\"), hyper_params.endo_or_epi)))\n hyper_params.mean = mean.item()\n hyper_params.std = std.item()\n else:\n mean, std = torch.tensor(hyper_params.mean), torch.tensor(hyper_params.std)\n \n print(\"Mean: {}, Std: {}\".format(mean.item(), std.item()))\n # borrowed from http://pytorch.org/tutorials/advanced/neural_style_tutorial.html\n # and http://pytorch.org/tutorials/beginner/data_loading_tutorial.html\n train_transformer = transforms.Compose([\n transforms.Normalize(mean=[mean.item()], std=[std.item()])\n ])\n \n eval_transformer = transforms.Compose([\n transforms.Normalize(mean=[mean.item()], std=[std.item()])\n ])\n\n for split in ['train', 'val', 'test']:\n if split in types:\n path = str(Path(data_dir) / \"{}_heart_scans\".format(split if split != 'val' else 'train'))\n\n if split == 'train':\n if train_idx is not None:\n dl = DataLoader(Subset(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, train_transformer), train_idx), \n batch_size=hyper_params.batch_size, \n shuffle=True,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n else:\n dl = DataLoader(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, train_transformer), \n batch_size=hyper_params.batch_size, \n shuffle=True,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n else:\n if (split == 'val') and (val_idx is not None): \n dl = DataLoader(Subset(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, eval_transformer), val_idx), \n batch_size=hyper_params.batch_size, \n shuffle=False,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda) \n else:\n dl = DataLoader(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, eval_transformer), \n batch_size=hyper_params.batch_size, \n shuffle=False,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n\n dataloaders[split] = dl\n\n return dataloaders", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def get_dataset(self):\n return" ]
[ "0.64198226", "0.6303011", "0.62908787", "0.6223448", "0.6176214", "0.6175871", "0.59194773", "0.59120303", "0.5911008", "0.59027666", "0.5870242", "0.584646", "0.5842133", "0.5794101", "0.57517266", "0.574646", "0.56932807", "0.5689548", "0.5668795", "0.5666431", "0.56519496", "0.5641953", "0.55997527", "0.5598908", "0.5585775", "0.5576712", "0.55744284", "0.5558059", "0.5552589", "0.55400634", "0.55322725", "0.55246335", "0.5524457", "0.5516219", "0.5506324", "0.5488089", "0.54876935", "0.54613286", "0.54556596", "0.5453771", "0.5452946", "0.54512155", "0.5450985", "0.5448641", "0.54481095", "0.5434148", "0.5433184", "0.54303765", "0.5425529", "0.54217196", "0.5419809", "0.54123855", "0.5405322", "0.5394609", "0.5391198", "0.5365645", "0.53621894", "0.5352216", "0.5348565", "0.5345256", "0.5342907", "0.5331544", "0.531984", "0.53164536", "0.5303098", "0.5287537", "0.5287537", "0.5287537", "0.5287537", "0.5287537", "0.52728623", "0.5248868", "0.5245537", "0.5245256", "0.5239651", "0.5239651", "0.5239651", "0.5239651", "0.52351254", "0.52320826", "0.5226803", "0.5223386", "0.5218782", "0.52121973", "0.5209726", "0.520924", "0.520924", "0.5198293", "0.5195303", "0.51901877", "0.518955", "0.5187228", "0.5182735", "0.51797163", "0.51654303", "0.51634556", "0.5162887", "0.51596963", "0.51584494", "0.51582325" ]
0.67818266
0
Name of this task
def name(self): return self._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_name(self):\n pass", "def task_name(self) -> str:\n return self._task_name", "def getTaskName(self):\n return self._taskName", "def task(self) -> str:\n return self._task", "def task(self, name):\n pass", "def task_label(self) -> str:\n label = str(self.request.id) if self.request.id else self.name\n label += '_%d' % self.request.retries if self.request.retries >= 1 else ''\n return label", "def __str__(self):\n return self.task", "def TaskDisplayName(cls, task):\n if not task: return None\n return '//' + cls.TaskRelativeName(task)", "def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)", "def name(self):\n return self._job", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def generateTaskName(self):\n brokenComponent = ['head','hand','leg','body','hand','leg']\n for component in brokenComponent:\n self.enqueue(Task(component))", "def get_target_simple(self):\n task = self.task.get_task(self.task_id)\n return str(task['name'])", "def name(self):\n pass", "def gen_task_name(app, name, module_name):\n ...", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def name(self) -> str:\n return self.inst['targetname']", "def name(self):\n try:\n return self._name\n except AttributeError:\n if self.is_task:\n try:\n return self.pos_str\n except:\n return os.path.basename(self.workdir)\n else:\n return os.path.basename(self.workdir)", "def get_name(self) -> str:\n pass", "def name() -> str:\n pass", "def name(self):\r\n pass", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def get_name(self):\n pass", "def get_name(self):\n pass", "def getTaskTitle(self) -> unicode:\n ...", "def getTaskTitle(self) -> unicode:\n ...", "def gettaskname(self): # 3\n sizetaskname_ = (1 + self.gettasknamelen())\n arr_taskname = array.array(\"b\",[0]*((sizetaskname_)))\n memview_arr_taskname = memoryview(arr_taskname)\n res,resargs = self.__obj.gettaskname(sizetaskname_,memview_arr_taskname)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_taskname = resargs\n retarg_taskname = arr_taskname.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_taskname", "def name(self) -> str:\n raise NotImplementedError", "def name(self):\n return self.__name__", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def TaskBaseName(cls, task):\n if not task: return None\n return os.path.basename(task)", "def name(self) -> str:\n raise NotImplementedError()", "def name(self):\n return self.NAME", "def get_name(self):\n return", "def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def get_name(self):\n return \"{0}: \".format(self.__class__.__name__)", "def name(self):\n return None", "def get_name() -> str:\n pass", "def get_name(self) -> str:\n raise NotImplementedError", "def get_name(self):\n\t\treturn self.__name", "def name(self) -> str:\n\t\traise NotImplementedError", "def name(self):\n raise NotImplementedError # pragma: no cover", "def task_file(self) -> str:\n return self._task_file", "def name ( self ) :\n return self.__name if self.__name else ''", "def name(self) -> str: # pragma: no cover", "def name(self):\n\t\treturn self.name_", "def name(self):\n raise NotImplementedError()", "def name(self):\n raise NotImplementedError()", "def get_name(self):\n\n\t\treturn self.__name", "def get_name(self):\n return None", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def state_name(self):\n return TASK_STATE.get(self.state, 'UNKNOWN')", "def name(self):\n\t\treturn self._name", "def name(self):\n\t\treturn self._name", "def tname(self) -> str:", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n # type: () -> str\n return self._name" ]
[ "0.94787014", "0.8952571", "0.83853745", "0.80476105", "0.7959026", "0.7788897", "0.7784508", "0.75775784", "0.74878675", "0.748582", "0.7418969", "0.7418969", "0.7418969", "0.7418969", "0.74116933", "0.7375836", "0.73410845", "0.73359287", "0.7334646", "0.7334646", "0.7306739", "0.7287391", "0.7247417", "0.7221274", "0.7206384", "0.71984166", "0.71984166", "0.71984166", "0.71984166", "0.71984166", "0.7192279", "0.7192279", "0.7160165", "0.7160165", "0.71582997", "0.7153592", "0.7133368", "0.71312755", "0.71312755", "0.71312755", "0.71312755", "0.71312755", "0.71312755", "0.71256423", "0.71256423", "0.71256423", "0.71256423", "0.71256423", "0.71256423", "0.71256423", "0.71256423", "0.71256423", "0.71256423", "0.712382", "0.7113963", "0.7113734", "0.7111165", "0.7108342", "0.71068335", "0.71068335", "0.71068335", "0.71068335", "0.71068335", "0.71068335", "0.71068335", "0.71068335", "0.71068335", "0.71068335", "0.70999616", "0.70999616", "0.70999616", "0.70999616", "0.70999616", "0.70999616", "0.70999616", "0.70983016", "0.70954347", "0.70813644", "0.70695704", "0.7069106", "0.706444", "0.70615673", "0.7058208", "0.7057248", "0.70487386", "0.70472854", "0.7041053", "0.7041053", "0.7034973", "0.70309573", "0.70291036", "0.70291036", "0.70291036", "0.70291036", "0.70268077", "0.7019312", "0.7019312", "0.7009648", "0.70064384", "0.70064384", "0.7000012" ]
0.0
-1
Concatenate two task's datasets
def concatenate_tasks( tasks, concat_train=True, concat_valid=True, concat_test=True, ): new_task = deepcopy(tasks[0]) new_task._name = "+".join(task.name for task in tasks) if concat_train: new_task._train_data = ConcatDataset( [task.train_data for task in tasks]) if concat_valid: new_task._valid_data = ConcatDataset( [task.valid_data for task in tasks]) if concat_test: new_task._test_data = ConcatDataset([task.test_data for task in tasks])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concatenate_data():", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def Concat(datasets):\n\n dataset_num = len(datasets)\n dataset = datasets[0]\n for i in range(1, dataset_num):\n dataset.concatenate(datasets[i])\n return dataset", "def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)", "def concat(cls, pipe1, pipe2):\n # pylint: disable=protected-access\n if pipe1.dataset != pipe2.dataset and pipe1.dataset is not None and pipe2.dataset is not None:\n raise ValueError(\"Cannot add pipelines with different datasets\")\n\n new_p1 = cls.from_pipeline(pipe1)\n new_p2 = cls.from_pipeline(pipe2)\n new_p1._action_list += new_p2._action_list[:]\n new_p1._variables = {**pipe1._variables, **pipe2._variables}\n new_p1.dataset = pipe1.dataset or pipe2.dataset\n return new_p1", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)", "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def merge(datasets: Sequence[\"Dataset\"]) -> \"Dataset\":\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._append_items(dsj, copy=False)\n\n return ds", "def Zip(datasets):\n return tf.data.Dataset.zip(datasets)", "def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)", "def combine_stack_and_label(filesource_dataset_1,filesource_dataset_2,num_sample):\n\n x = filesource_dataset_1[0]\n x_utterances = len(filesource_dataset_1)\n for idx in tqdm(range(1, x_utterances)):\n x = np.hstack((x, filesource_dataset_1[idx]))\n #print(x.shape)\n y = filesource_dataset_2[0]\n y_utterances = len(filesource_dataset_2)\n for idx in tqdm(range(1, y_utterances)):\n y = np.hstack((y, filesource_dataset_2[idx]))\n X = np.hstack((x,y))\n Y = np.hstack((np.ones((x.shape[1])),np.zeros((y.shape[1]))))\n\n if (X.shape[1] > num_sample):\n idx = np.random.choice(X.shape[1], num_sample)\n X = X[:, idx]\n Y = Y[idx]\n return X, Y", "def merge_datasets(self, other):\r\n if isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type == self.geometry_type:\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, DataFrame):\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, Series):\r\n self['merged_datasets'] = other\r\n elif isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type != self.geometry_type:\r\n raise ValueError(\"Spatial DataFrames must have the same geometry type.\")\r\n else:\r\n raise ValueError(\"Merge datasets cannot merge types %s\" % type(other))", "def flatten(self, in_place=True):\n new_dataset = TaskData()\n\n for i, dataset in enumerate(self._datasets):\n if i != self._default_index:\n new_dataset.merge(dataset)\n\n new_dataset.merge(self.default_dataset)\n\n # point all aliases to the new, single dataset\n new_aliases = {alias: 0 for alias, _ in self._aliases.items()}\n\n # replace existing datasets or return a new MultiTaskData object\n if in_place:\n self._datasets = [new_dataset]\n self._aliases = new_aliases\n self._default_index = 0\n else:\n return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys()))", "def concat_two_batches(batch1, batch2):\r\n with tf.name_scope('concat_batch'):\r\n if 'y' in batch1 and isinstance(batch1['y'], tf.Tensor):\r\n return {'x': tf.concat([batch1['x'], batch2['x']], axis=0),\r\n 'y': tf.concat([batch1['y'], batch2['y']], axis=0)}\r\n else:\r\n return {'x': tf.concat([batch1['x'], batch2['x']], axis=0)}", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def load_data(self, task):\n params = self.params\n data = {splt: {} for splt in ['train', 'valid', 'test']}\n dpath = os.path.join(params.data_path, 'eval', task)\n\n self.n_sent = 1 if task in ['SST-2', 'CoLA'] else 2\n\n for splt in ['train', 'valid', 'test']:\n\n # load data and dictionary\n data1 = load_binarized(os.path.join(dpath, '%s.s1.pth' % splt), params)\n data2 = load_binarized(os.path.join(dpath, '%s.s2.pth' % splt), params) if self.n_sent == 2 else None\n data['dico'] = data.get('dico', data1['dico'])\n\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n if self.n_sent == 2:\n set_dico_parameters(params, data, data2['dico'])\n\n # create dataset\n if self.n_sent == 1:\n data[splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n else:\n data[splt]['x'] = ParallelDataset(\n data1['sentences'], data1['positions'],\n data2['sentences'], data2['positions'],\n params\n )\n\n # load labels\n if splt != 'test' or task in ['MRPC']:\n # read labels from file\n with open(os.path.join(dpath, '%s.label' % splt), 'r') as f:\n lines = [l.rstrip() for l in f]\n # STS-B task\n if task == 'STS-B':\n assert all(0 <= float(x) <= 5 for x in lines)\n y = [float(l) for l in lines]\n # QQP\n elif task == 'QQP':\n UNK_LABEL = 0\n lab2id = {x: i for i, x in enumerate(sorted(set(lines) - set([''])))}\n y = [lab2id.get(x, UNK_LABEL) for x in lines]\n # other tasks\n else:\n lab2id = {x: i for i, x in enumerate(sorted(set(lines)))}\n y = [lab2id[x] for x in lines]\n data[splt]['y'] = torch.LongTensor(y)\n assert len(data[splt]['x']) == len(data[splt]['y'])\n\n # compute weights for weighted training\n if task != 'STS-B' and params.weighted_training:\n weights = torch.FloatTensor([\n 1.0 / (data['train']['y'] == i).sum().item()\n for i in range(len(lab2id))\n ]).npu()\n self.weights = weights / weights.sum()\n else:\n self.weights = None\n\n return data", "def joinDev(training, tLabels, dev, dLabels):\n\tdata = [n.concatenate([t,d]) for t,d in zip(training, dev)]\n\n\treturn data, n.concatenate([tLabels, dLabels])", "def concat(datasets: Sequence[\"Dataset\"], keep=\"last\") -> \"Dataset\":\n\n if keep != \"last\":\n raise NotImplementedError(\n \"Last values is the only available option at the moment.\"\n )\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._concat_time(dsj, copy=False)\n\n return ds", "def merge_models(model_1, model_2, task=None):\n\n def _merge_models(model_1, model_2):\n\n result_model = copy.deepcopy(model_1)\n\n if isinstance(model_1, torch.nn.Embedding):\n\n result_model = _add_embedding_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.Linear):\n result_model = _add_linear_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.LayerNorm):\n result_model = _add_double_norm_layer(model_1, model_2)\n\n elif isinstance(model_1, BertSelfAttention):\n result_model = _add_bert_self_attention_layer(model_1, model_2)\n\n for name_1, name_2 in zip(model_1._modules, model_2._modules):\n module_1 = model_1._modules[name_1]\n module_2 = model_2._modules[name_2]\n\n result_model._modules[name_1] = _merge_models(module_1, module_2)\n\n return result_model\n\n result_model = _merge_models(model_1, model_2)\n\n result_model._text_field_embedder._token_embedders[\"tokens\"].output_dim = 1024\n\n if task == \"QA\":\n result_model._linear_layer = _add_final_linear_layer(\n model_1._linear_layer, model_2._linear_layer\n )\n else:\n result_model._classification_layer = _add_final_linear_layer(\n model_1._classification_layer, model_2._classification_layer\n )\n\n return result_model", "def combine(self, states, tasks):\n self._assert_is_batched(states, tasks)\n return self._tf_call(self._combine, states, tasks)", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def __concatenateB0(self, source1, source2, target):\n cmd = \"mrcat {} {} {} -axis 3 -nthreads {} -quiet\".format(source1, source2, target, self.getNTreadsMrtrix())\n self.launchCommand(cmd)\n return target", "def concat(a, b):\n return torch.cat((a, b), 1)", "def combine_datasources(dset, dset_extra, valid_size=0, shuffle=True, random_seed=2019,\n maxsize=None, device='cpu'):\n if shuffle == True and random_seed:\n np.random.seed(random_seed)\n\n ## Convert both to TensorDataset\n if isinstance(dset, torch.utils.data.DataLoader):\n dataloader_args = {k:getattr(dset, k) for k in ['batch_size', 'num_workers']}\n X, Y = load_full_dataset(dset, targets=True, device=device)\n d = int(np.sqrt(X.shape[1]))\n X = X.reshape(-1, 1, d, d)\n dset = torch.utils.data.TensorDataset(X, Y)\n logger.info(f'Main data size. X: {X.shape}, Y: {Y.shape}')\n elif isinstance(dset, torch.utils.data.Dataset):\n raise NotImplemented('Error: combine_datasources cant take Datasets yet.')\n\n merged_dset = torch.utils.data.ConcatDataset([dset, dset_extra])\n train_idx, valid_idx = random_index_split(len(dset), 1-valid_size, (maxsize, None)) # No maxsize for validation\n train_idx = np.concatenate([train_idx, np.arange(len(dset_extra)) + len(dset)])\n\n if shuffle:\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n else:\n train_sampler = SubsetSampler(train_idx)\n valid_sampler = SubsetSampler(valid_idx)\n\n train_loader_ext = dataloader.DataLoader(merged_dset, sampler = train_sampler, **dataloader_args)\n valid_loader_ext = dataloader.DataLoader(merged_dset, sampler = valid_sampler, **dataloader_args)\n\n logger.info(f'Fold Sizes: {len(train_idx)}/{len(valid_idx)} (train/valid)')\n\n return train_loader_ext, valid_loader_ext", "def resplit_datasets(dataset, other_dataset, random_seed=None, split=None):\n # Prevent circular dependency\n from torchnlp.datasets import Dataset\n\n concat = dataset.rows + other_dataset.rows\n shuffle(concat, random_seed=random_seed)\n if split is None:\n return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):])\n else:\n split = max(min(round(len(concat) * split), len(concat)), 0)\n return Dataset(concat[:split]), Dataset(concat[split:])", "def combine_stats(self, self2):\n if self.covs_ds[\"variable\"] != self2.covs_ds[\"variable\"]:\n raise ValueError(\"Variable names in the two datasets are not the same\")\n\n self.covs_ds[\"num_times\"] += self2.covs_ds[\"num_times\"]\n self.covs_ds[\"sum\"] += self2.covs_ds[\"sum\"]\n self.covs_ds[\"sumsq\"] += self2.covs_ds[\"sumsq\"]\n if 'dstn' in self.covs_ds.dims:\n if self.covs_ds.dims['dstn'] != self2.covs_ds.dims['dstn']:\n raise ValueError(\"Number of distances in the two datasets are not the same\")\n self.covs_ds[self.nam_sumsq_var] += self2.covs_ds[self.nam_sumsq_var]", "def add_dataset(self, task_name, dataset=None, *, aliases=None):\n self._datasets.append(dataset if dataset is not None else TaskData())\n last_index = len(self._datasets) - 1\n self._aliases[task_name] = last_index\n\n if aliases is not None:\n for alias in aliases:\n self._aliases[alias] = last_index\n\n if len(self._datasets) == 1:\n self._default_index = 0", "def join(upstream, product):\n a = pd.read_parquet(str(upstream[\"get\"]))\n b = pd.read_parquet(str(upstream[\"features\"]))\n df = a.join(b)\n df.to_parquet(str(product))", "def concatenate(cls, datasets, datatype=None, new_type=None):\n from . import Dataset, default_datatype\n new_type = new_type or Dataset\n if isinstance(datasets, NdMapping):\n dimensions = datasets.kdims\n keys, datasets = zip(*datasets.data.items())\n elif isinstance(datasets, list) and all(not isinstance(v, tuple) for v in datasets):\n # Allow concatenating list of datasets (by declaring no dimensions and keys)\n dimensions, keys = [], [()]*len(datasets)\n else:\n raise DataError('Concatenation only supported for NdMappings '\n 'and lists of Datasets, found %s.' % type(datasets).__name__)\n\n template = datasets[0]\n datatype = datatype or template.interface.datatype\n\n # Handle non-general datatypes by casting to general type\n if datatype == 'array':\n datatype = default_datatype\n elif datatype == 'image':\n datatype = 'grid'\n\n if len(datasets) > 1 and not dimensions and cls.interfaces[datatype].gridded:\n raise DataError('Datasets with %s datatype cannot be concatenated '\n 'without defining the dimensions to concatenate along. '\n 'Ensure you pass in a NdMapping (e.g. a HoloMap) '\n 'of Dataset types, not a list.' % datatype)\n\n datasets = template.interface.cast(datasets, datatype)\n template = datasets[0]\n data = list(zip(keys, datasets)) if keys else datasets\n concat_data = template.interface.concat(data, dimensions, vdims=template.vdims)\n return template.clone(concat_data, kdims=dimensions+template.kdims, new_type=new_type)", "def task_dataset(representations, features):\n return FakeTaskDataset(representations, features)", "def join(self, other_h5):\n\n with other_h5 as h5:\n for run_idx in h5.run_idxs:\n # the other run group handle\n other_run = h5.run(run_idx)\n # copy this run to this file in the next run_idx group\n self.h5.copy(other_run, '{}/{}'.format(RUNS, self.next_run_idx()))", "def merge_datasets(dslist):\n # We use a variant of our fast stitching routine\n # So first create a sorted list of angles and source files\n container = []\n print 'Passed %d datasets for merging ' % len(dslist)\n proc_info = \"\"\"This dataset was created by collating points from multiple datasets. Data reduction \n information for the individual source datasets is as follows:\"\"\"\n title_info = \"Merge:\"\n for num,dataset in enumerate(dslist):\n storage_info = zip(dataset.axes[0],dataset.storage,dataset.var.storage)\n container.extend(storage_info)\n proc_info += \"\\n\\n===Dataset %s===\\n\" % str(dataset.title)\n try:\n proc_info += dataset.harvest_metadata(\"CIF\")[\"_pd_proc_info_data_reduction\"]\n except KeyError:\n pass\n title_info = title_info + dataset.title + ':'\n # So we have a list of angle,intensity,variance triples which we sort on angle\n container = sorted(container, key=lambda(angle,intensity,variance):angle)\n angles = map(lambda (a,b,c):a,container)\n intensities = map(lambda (a,b,c):b,container)\n variances = map(lambda (a,b,c):c,container)\n rs = Dataset(intensities)\n rs.var = variances\n rs.axes[0] = angles\n rs.axes[0].title = 'Two theta (degrees)'\n rs.title = title_info\n # Add metadata\n AddCifMetadata.add_standard_metadata(rs)\n rs.add_metadata(\"_pd_proc_info_data_reduction\",proc_info,\"CIF\")\n return rs", "def concat(self, others):\n table = pd.concat([otr.data for otr in others], ignore_index=True)\n result = self.as_dataframe(table)\n result.sort()\n return result", "def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self", "def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2", "def zarr_concat(input_zarrs: List[str], output_zarr: str, verbose: bool = False) -> None:\n\n output_dataset = ChunkedDataset(output_zarr)\n if os.path.exists(output_zarr):\n output_dataset.open(\"a\")\n else:\n output_dataset.initialize()\n\n for input_zarr in input_zarrs:\n\n input_dataset = ChunkedDataset(input_zarr)\n input_dataset.open()\n\n if verbose:\n print(f\"input scenes size: {input_dataset.scenes.shape[0]}\")\n print(f\"input frames size: {input_dataset.frames.shape[0]}\")\n print(f\"input agents size: {input_dataset.agents.shape[0]}\")\n\n frame_offset = output_dataset.frames.shape[0]\n new_scenes = np.zeros(input_dataset.scenes.shape[0], dtype=SCENE_DTYPE)\n\n for i, scene in enumerate(input_dataset.scenes): # add new scenes to zarr\n scene[\"frame_index_interval\"] = scene[\"frame_index_interval\"] + frame_offset\n new_scenes[i] = scene\n output_dataset.scenes.append(new_scenes)\n\n agent_offset = output_dataset.agents.shape[0]\n new_frames = np.zeros(input_dataset.frames.shape[0], dtype=FRAME_DTYPE)\n for i, frame in enumerate(input_dataset.frames): # add new frames to the zarr\n frame[\"agent_index_interval\"] = frame[\"agent_index_interval\"] + agent_offset\n new_frames[i] = frame\n output_dataset.frames.append(new_frames)\n\n output_dataset.agents.append(input_dataset.agents) # add new agents to the zarr\n\n if verbose:\n print(f\"output scenes size: {output_dataset.scenes.shape[0]}\")\n print(f\"output frames size: {output_dataset.frames.shape[0]}\")\n print(f\"output agents size: {output_dataset.agents.shape[0]}\")", "def task_reformat_data():\n\n for data_type in data_sets:\n yield {\n 'actions': ['python reformat_weather_data.py %(dependencies)s > %(targets)s'],\n 'file_dep': ['UK_{}_data.txt'.format(data_type)],\n 'targets': ['UK_{}_data.reformatted.txt'.format(data_type)],\n }", "def ConcatDF(train_set, test_set):\n df_all = pd.concat([train_set, test_set], sort=True).reset_index(drop=True)\n df_all.trn_len = train_set.shape[0]\n return df_all", "def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def combine(cls, first: 'Output', second: 'Output') -> 'Output':\n return cls(\n first.output,\n second.target,\n second.input,\n second.params or first.params,\n first.delay + second.delay,\n times=first.times if second.times < 0\n else second.times if first.times < 0\n else min(first.times, second.times),\n inst_out=first.inst_out,\n inst_in=second.inst_in,\n comma_sep=first.comma_sep and second.comma_sep,\n )", "def __add__(self, other):\n if isiterable(other) and not isinstance(other, DAG):\n return TaskGroup([self] + list(other))\n else:\n return TaskGroup((self, other))", "def __add__(self, other):\n if not isinstance(other, RunTS):\n raise TypeError(f\"Cannot combine {type(other)} with RunTS.\")\n\n # combine into a data set use override to keep attrs from original\n combined_ds = xr.combine_by_coords(\n [self.dataset, other.dataset], combine_attrs=\"override\"\n )\n\n n_samples = (\n self.sample_rate\n * float(\n combined_ds.time.max().values - combined_ds.time.min().values\n )\n / 1e9\n ) + 1\n\n new_dt_index = make_dt_coordinates(\n combined_ds.time.min().values,\n self.sample_rate,\n n_samples,\n self.logger,\n )\n\n new_run = RunTS(\n run_metadata=self.run_metadata,\n station_metadata=self.station_metadata,\n survey_metadata=self.survey_metadata,\n )\n\n new_run.dataset = combined_ds.interp(\n time=new_dt_index, method=\"slinear\"\n )\n\n new_run.run_metadata.update_time_period()\n new_run.station_metadata.update_time_period()\n new_run.survey_metadata.update_time_period()\n new_run.filters = self.filters\n new_run.filters.update(other.filters)\n\n return new_run", "def mconcat(a, b):\r\n if a is None:\r\n return b\r\n if b is None:\r\n return a\r\n for key in b.keyset:\r\n value=get(b,key)\r\n put(a,key,value)\r\n return a", "def join(self, other, on):\n\t\t# check for correct join\n\t\tif not (on in self.headers or on in other.headers):\n\t\t\tprint \"Error: header '{0}' not found in both collections\".format(on)\n\t\t\treturn None\n\n\t\t# create new dataset\n\t\tjoined = Dataset()\n\t\t\n\t\t# fill new dataset with combined data\n\t\tmappedHeaders = joinHeaders(self, other, joined, on)\n\t\tmergeRows(self, other, joined, on, mappedHeaders)\n\t\tjoined.ensureFilled()\n\n\t\t# return newly created dataset\n\t\treturn joined", "def concat(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_concat = Automaton()\n nfa_concat.final = nfa2_star.final\n nfa_concat.q_0 = nfa1_star.q_0\n nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n for a in nfa1_star.final:\n key = a + ', .'\n if nfa_concat.transition.get(key, 0) == 0:\n nfa_concat.transition[key] = [nfa2_star.q_0]\n else:\n nfa_concat.transition[key].append(nfa2_star.q_0)\n\n self.aut_stack.append(nfa_concat)", "def test_includes_two_new_datasets(self):\n new_datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for new_dataset in new_datasets:\n self.assertIn(new_dataset, table.data)", "def shuffle(dataset_one: np.ndarray, dataset_two: np.ndarray = None) -> np.ndarray:\n np.random.shuffle(dataset_one)\n if dataset_two is not None:\n np.random.shuffle(dataset_two)\n return dataset_one, dataset_two\n else:\n return dataset_one", "def concat(events, max_gap=3600):\r\n # [i.load_data() for i in events]\r\n\r\n result = events[0]\r\n result.load_data()\r\n\r\n for i in range(1, len(events)):\r\n events[i].load_data()\r\n result = result.join(events[i], max_gap)\r\n\r\n return result", "def set_task(task_id, train_dataset, val_dataset, test_dataset):\n assert isinstance(task_id, int)\n train_dataset.set_task(task_id)\n val_dataset.set_task(task_id)\n test_dataset.set_task(task_id)", "def concatenate(tensors, axis=0):\n raise NotImplementedError", "def _write_to_dataset(parser1, parser2, dset, rundate):\n\n data_all1 = parser1.as_dict()\n data_all2 = parser2.as_dict()\n if parser1.file_path == parser2.file_path:\n collection = [data_all1]\n else:\n collection = [data_all1, data_all2]\n\n # Meta information\n dset.meta[\"tech\"] = \"slr\"\n dset.meta.add(\"file\", parser1.file_path.stem, section=\"input\")\n dset.meta.add(\"file\", parser2.file_path.stem, section=\"input\")\n dset.meta.add(\"type\", config.tech.obs_format.str.upper(), section=\"input\")\n\n # Make new dict \"obs_data\" containing only data in relevant time interval:\n arc_length = config.tech.arc_length.float\n rundate_datetime = datetime(rundate.year, rundate.month, rundate.day)\n obs_data = dict()\n for data_all in collection:\n for i, x in enumerate(data_all[\"meta\"][\"obs_time\"]):\n if rundate_datetime <= x < rundate_datetime + timedelta(days=arc_length):\n for key in (\"meta\", \"obs\", \"obs_str\"):\n for field, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(field, list()).append(val[i])\n\n data_all.pop(\"meta\")\n data_all.pop(\"obs\")\n data_all.pop(\"obs_str\")\n\n for key in data_all.keys():\n if key.startswith(\"met_\"):\n for key2, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(key2, list()).append(val)\n elif key.startswith(\"satellite_\"):\n # TODO: Use this information in the future?\n continue\n elif key.startswith(\"station_\"):\n # TODO: Use this information in the future?\n continue\n else:\n log.fatal(f\"Unknown data type{key}\")\n\n obs_date = obs_data[\"meta\"][\"obs_date\"]\n time = [obs_date[i] + timedelta(seconds=obs_data[\"meta\"][\"obs_sec\"][i]) for i in range(0, len(obs_date))]\n dset.num_obs = len(obs_data[\"meta\"][\"obs_time\"])\n dset.add_time(\"time\", val=time, scale=\"utc\", fmt=\"datetime\")\n dset.add_text(val=obs_data[\"meta\"][\"station\"], name=\"station\")\n dset.add_text(val=obs_data[\"meta\"][\"satellite\"], name=\"satellite\")\n dset.add_float(val=obs_data[\"meta\"][\"bin_rms\"], unit=\"picoseconds\", name=\"bin_rms\")\n # Positions\n trf = apriori.get(\"trf\", time=dset.time)\n for station in dset.unique(\"station\"):\n trf_site = trf[station]\n station_pos = trf_site.pos.trs.val\n log.debug(f\"Station position for {station} ({trf_site.name}) is (x,y,z) = {station_pos.mean(axis=0)}\")\n domes = trf_site.meta[\"domes\"]\n obs_data[\"pos_\" + station] = station_pos\n obs_data[\"station-other_\" + station] = dict(domes=domes, cdp=station, site_id=station)\n dset.add_position(\n \"site_pos\",\n time=dset.time,\n system=\"trs\",\n val=np.array([obs_data[\"pos_\" + s][idx] for idx, s in enumerate(dset.station)]),\n )\n # Station data\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station_\")])\n for field in sta_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"station_\" + s][field]) for s in dset.station]))\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n for field in sta_fields:\n dset.add_text(field, val=[obs_data[\"station-other_\" + s][field] for s in dset.station])\n\n # Station meta\n station_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n pos_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"pos_\")])\n\n for sta_key, pos_key in zip(station_keys, pos_keys):\n sta_name = sta_key.replace(\"station-other_\", \"\")\n cdp = obs_data[sta_key][\"cdp\"]\n dset.meta.add(sta_name, \"site_id\", cdp)\n longitude, latitude, height, _ = sofa.iau_gc2gd(2, obs_data[pos_key][0, :]) # TODO: Reference ellipsoid\n dset.meta[\"station\"].setdefault(sta_name, {})[\"cdp\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"site_id\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"domes\"] = obs_data[sta_key][\"domes\"]\n dset.meta[\"station\"].setdefault(sta_name, {})[\"marker\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"description\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"longitude\"] = longitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"latitude\"] = latitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"height\"] = height\n\n # Satellite data\n sat_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"satellite_\")])\n for field in sat_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"satellite_\" + s][field]) for s in dset.satellite]))\n\n # Observations\n # In the dataset, obs_time is seconds since rundate:\n v = [\n (obs_data[\"meta\"][\"obs_date\"][i] - rundate_datetime).total_seconds() + obs_data[\"meta\"][\"obs_sec\"][i]\n for i in range(0, dset.num_obs)\n ]\n\n obs_data[\"obs\"].pop(\"obs_time\")\n dset.add_float(\"obs_time\", val=v)\n for field, values in obs_data[\"obs\"].items():\n dset.add_float(field, val=np.array(values))\n\n for field, values in obs_data[\"obs_str\"].items():\n dset.add_text(field, val=values)\n\n return obs_data", "def combine_train_infer(train_file, infer_dir):\n\n train_df = pd.read_feather(train_file)\n\n time_range = range(len([f for f in os.listdir(infer_dir) if 'feather' in f]))\n infer_df_list = [pd.read_feather(f'{infer_dir}/{t}.feather') for t in time_range]\n\n comb_df_list = []\n train_df.index = [-1] * len(train_df)\n\n comb_df_list.append(train_df)\n\n for t in time_range:\n df = infer_df_list[t]\n df.index = [t] * len(df)\n\n comb_df_list.append(df)\n\n return pd.concat(comb_df_list), train_df, infer_df_list", "def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final", "def _merge_datasets(dataset1, dataset2):\n\n # Number of labels in dataset 1\n _NUM_LABELS_D1 = len(np.unique(dataset1['labels']))\n\n # Number of labels in dataset 2\n _NUM_LABELS_D2 = len(np.unique(dataset2['labels']))\n\n # Call the optimization function to train on the first dataset and predict on the second dataset\n ds2_labels_using_ds1 = _optimization(dataset1, dataset2, nb_epochs=NUM_EPOCHS)\n\n # Initialize the label counting matrix\n label_counter = np.zeros(shape=(_NUM_LABELS_D2, _NUM_LABELS_D1))\n\n # Fill the label counting matrix accordingly\n for i in range(len(ds2_labels_using_ds1)):\n label_counter[int(dataset2['labels'][i]), int(ds2_labels_using_ds1[i])] += 1\n\n Matrix1 = np.matrix.copy(label_counter)\n\n # Initialize the new set of labels for dataset 2\n ds2_new_labels = np.zeros(shape=(len(ds2_labels_using_ds1), 2))\n\n # Determine the new labels for dataset 2\n for i in range(len(ds2_labels_using_ds1)):\n if dataset2['labels'][i] == np.argmax(label_counter[:, int(ds2_labels_using_ds1[i])]):\n ds2_new_labels[i, :] = np.array([ds2_labels_using_ds1[i], dataset2['labels'][i]])\n else:\n ds2_new_labels[i, :] = np.array([ds2_labels_using_ds1[i], -1])\n\n # Call the optimization function to train on the second dataset and predict on the first dataset\n ds1_labels_using_ds2 = _optimization(dataset2, dataset1, nb_epochs=NUM_EPOCHS)\n\n # Initialize the label counting matrix\n label_counter = np.zeros(shape=(_NUM_LABELS_D1, _NUM_LABELS_D2))\n\n # Fill the label counting matrix accordingly\n for i in range(len(ds1_labels_using_ds2)):\n label_counter[int(dataset1['labels'][i]), int(ds1_labels_using_ds2[i])] += 1\n\n Matrix2 = np.matrix.copy(label_counter.T)\n\n # Initialize the new set of labels for dataset 1\n ds1_new_labels = np.zeros(shape=(len(ds1_labels_using_ds2), 2))\n\n # Determine the new labels for dataset 1\n for i in range(len(ds1_labels_using_ds2)):\n if ds1_labels_using_ds2[i] == np.argmax(label_counter[int(dataset1['labels'][i]), :]):\n ds1_new_labels[i, :] = np.array([dataset1['labels'][i], ds1_labels_using_ds2[i]])\n else:\n ds1_new_labels[i, :] = np.array([dataset1['labels'][i], -1])\n\n # Concatenate all labels from both datasets\n all_labels = np.concatenate((ds1_new_labels, ds2_new_labels), axis=0)\n\n # Transform the tuple labels to scalar labels\n already_explored_rows = []\n\n label = 0\n\n vector_label = np.zeros(shape=(all_labels.shape[0], 1))\n\n for i in range(all_labels.shape[0]):\n if np.where((all_labels == all_labels[i, :]).all(axis=1))[0][0] not in already_explored_rows:\n rows = np.where((all_labels == all_labels[i, :]).all(axis=1))[0]\n vector_label[rows] = label\n label += 1\n for j in range(len(rows)):\n already_explored_rows.append(rows[j])\n\n vector_label = np.squeeze(vector_label)\n\n # One hot encoded version of the labels\n hot_labels = _one_hot_encode(vector_label, len(set(vector_label)))\n\n # Initialize the concatenated dataset\n new_dataset = {'labels': vector_label, 'hot_labels': hot_labels, 'actual_lengths': {}}\n\n # Fill the corresponding keys for the concatenated dataset\n for key in dataset1.keys():\n if (key != 'labels') and (key != 'hot_labels') and (key != 'actual_lengths'):\n new_dataset[key] = np.concatenate((dataset1[key], dataset2[key]), axis=0)\n if key == 'actual_lengths':\n for key2 in dataset1[key]:\n new_dataset[key][key2] = np.concatenate((dataset1[key][key2], dataset2[key][key2]), axis=0)\n\n # Return the merged dataset as a dictionary\n return new_dataset, Matrix1, Matrix2", "def data_list_wdl_merge(data_list1:list, data_list2:list) -> list:\n list_size = len(data_list1)\n merged_data_list = []\n for i in range(list_size):\n merged_data_list.append(pd.concat([data_list1[i],data_list2[i]]))\n return merged_data_list", "def get_dataset(self):\n\n trainset = datasets.SVHN('datasets/SVHN/train/', split='train', transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.SVHN('datasets/SVHN/test/', split='test', transform=self.val_transforms,\n target_transform=None, download=True)\n extraset = datasets.SVHN('datasets/SVHN/extra', split='extra', transform=self.train_transforms,\n target_transform=None, download=True)\n\n trainset = torch.utils.data.ConcatDataset([trainset, extraset])\n\n return trainset, valset", "def compute():\n dataset1 = 'project/data/dataset1.csv'\n dataset2 = \"project/data/dataset2.csv\"\n\n reader = CsvReader()\n\n data1 = reader.readCsv(dataset1)\n data2 = reader.readCsv(dataset2)\n\n database1 = DataBase(data1)\n database2 = DataBase(data2)\n\n Thread1 = threading.Thread(target=database1.fill, args= (1, ))\n Thread2 = threading.Thread(target=database2.fill, args= (2, ))\n\n\n Thread1.start()\n Thread2.start()", "def extend(self, other, adapt_conf=True):\n # Check if category metadata match\n if (self.size() > 0) and (other.size() > 0):\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n a, b = getattr(self, attr), getattr(other, attr)\n if a != b:\n raise ConcatenationError(\n f\"Categorisation metadata is different for '{attr}': {a} != {b}\"\n )\n elif other.size() > 0:\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n setattr(self, attr, getattr(other, attr))\n if getattr(self, \"tstep_h\", None) is None:\n self.tstep_h = getattr(other, \"tstep_h\", None)\n else:\n if getattr(other, \"tstep_h\", None) is not None:\n if self.tstep_h != other.tstep_h:\n raise ConcatenationError(\n \"Extending by a TrackRun with different timestep is not allowed\"\n )\n if adapt_conf and other.conf is not None:\n if self.conf is None:\n self.conf = other.conf.copy()\n else:\n for field in self.conf._fields:\n if getattr(self.conf, field) != getattr(other.conf, field):\n setattr(self.conf, field, None)\n self.sources.extend(other.sources)\n\n new_data = pd.concat([self.data, other.data], sort=False)\n new_track_idx = new_data.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n mux = pd.MultiIndex.from_arrays(\n [new_track_idx, new_data.index.get_level_values(1)], names=new_data.index.names\n )\n self.data = new_data.set_index(mux)\n\n # Concatenate categories\n if (self.cats is not None) or (other.cats is not None):\n new_cats = pd.concat([self.cats, other.cats], sort=False).fillna(False)\n new_track_idx = new_cats.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n ix = pd.Index(new_track_idx, name=new_cats.index.name)\n self.cats = new_cats.set_index(ix)", "def test_concat_data(self):\n\n this_satellite_dict = satellite_io.concat_data(\n satellite_dicts=[\n SATELLITE_DICT_SUBSET_BY_INDEX, SATELLITE_DICT_SUBSET_BY_TIME\n ]\n )\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_CONCAT\n ))", "def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)", "def add_datapoints(self, tasks):\n\n counters = {\n 'SUCCESS': 0,\n 'PENDING': 0,\n 'STARTED': 0,\n 'FAILURE': 0,\n 'RETRY' : 0,\n 'REVOKED': 0,\n 'RECEIVED': 0,\n };\n\n for task in tasks:\n counters[task[1]['state']] += 1\n\n for counter in counters:\n self.add_gauge_value('Tasks/' + counter.capitalize(), 'tasks', counters.get(counter, 0))", "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def merge(self, other, allow_duplicate=False, do_spaces=True, do_datasets=True, do_tasksets=True, do_results=True):\n #TODO: May need to organize a staging area to ensure this merge is atomic\n if self.mode == 'r': raise ValueError, \"Cannot merge into read-only store\"\n ignored_md = ['uuid', 'avg_learn', 'avg_classify', 'name', 'feature_name', 'class_name']\n\n space_direct_copy = [] # Spaces we copy directly, meaning the featuremap can be copied too\n space_feature_mapping = {}\n if do_spaces or do_datasets:\n # Must do spaces if we do datasets, because spaces may have been updated\n for space_node in ProgressIter(list(other.spaces), label='Copying spaces'):\n logger.debug(\"Considering space '%s'\", space_node._v_name)\n space_name = space_node._v_name\n if hasattr(self.spaces, space_name):\n logger.debug('Already had %s', space_name)\n src_space = other.get_Space(space_name)\n # Need to merge these. Feature spaces can be extended, but there is no mechanism for doing the same with class\n # spaces at the moment, so we must reject any that do not match. \n dst_space = self.get_Space(space_name)\n if src_space == dst_space:\n logger.debug(' Exact match')\n space_direct_copy.append(space_name)\n else:\n md = get_metadata(space_node)\n if md['type'] == 'class':\n raise ValueError, \"Cannot merge due to different versions of %s\" % str(md)\n elif md['type'] == 'feature':\n logger.debug(' Attempting to merge %s', str(md))\n # Reconcile the spaces. \n ## First we need to compute the new features to add\n new_feats = sorted(set(src_space) - set(dst_space))\n logger.debug(' Identified %d new features', len(new_feats))\n reconciled_space = dst_space + new_feats\n if len(new_feats) != 0:\n # Only need to extend if new features are found.\n self.extend_Space(space_name, reconciled_space)\n ## Now we need to build the mapping from the external space to ours\n space_index = dict( (k,v) for v,k in enumerate(reconciled_space))\n space_feature_mapping[space_name] = dict( (i,space_index[s]) for i,s in enumerate(src_space))\n else:\n raise ValueError, \"Unknown type of space\"\n else:\n self.fileh.copyNode(space_node, newparent=self.spaces)\n space_direct_copy.append(space_name)\n \n if do_datasets:\n for src_ds in ProgressIter(list(other.datasets), label='Copying datasets'):\n dsname = src_ds._v_name\n\n logger.debug(\"Considering dataset '%s'\", dsname)\n if hasattr(self.datasets, dsname):\n logger.warning(\"already had dataset '%s'\", dsname)\n dst_ds = getattr(self.datasets, dsname)\n # Failure to match instance_id is an immediate reject\n if dst_ds._v_attrs.instance_space != src_ds._v_attrs.instance_space:\n raise ValueError, \"Instance identifiers don't match for dataset %s\" % dsname\n # The hardest to handle is the feature data, since we may need to rearrange feature maps\n else:\n instance_space = other.get_DatasetMetadata(dsname)['instance_space']\n self.add_Dataset(dsname, instance_space, other.get_Space(dsname))\n dst_ds = getattr(self.datasets, dsname)\n\n node_names = ['class_data', 'sequence', 'tokenstreams']\n for name in node_names:\n logger.debug('Copying %s',name)\n if hasattr(src_ds, name):\n src_parent = getattr(src_ds, name)\n #TODO: may need to handle incomplete destination nodes\n dst_parent = getattr(dst_ds, name)\n for node in src_parent:\n if hasattr(dst_parent, node._v_name):\n logger.warning(\"already had '%s' in '%s'\", node._v_name, name)\n else:\n self.fileh.copyNode(node, newparent=dst_parent, recursive=True)\n else:\n logger.warning(\"Source does not have '%s'\", name)\n\n logger.debug('Copying feature_data')\n for node in src_ds.feature_data:\n space_name = node._v_name\n if hasattr(dst_ds.feature_data, space_name):\n logger.warning(\"already had '%s' in 'feature_data'\", space_name) \n elif space_name in space_direct_copy:\n # Direct copy the feature data because the destination store did not have this\n # space or had exactly this space\n logger.debug(\"direct copy of '%s' in 'feature_data'\", space_name)\n self.fileh.copyNode(node, newparent=dst_ds.feature_data, recursive=True)\n else:\n ax0 = node.feature_map.read(field='ax0')\n ax1 = node.feature_map.read(field='ax1')\n value = node.feature_map.read(field='value')\n feature_mapping = space_feature_mapping[space_name]\n\n feat_map = [ (i,feature_mapping[j],v) for (i,j,v) in zip(ax0,ax1,value)]\n self.add_FeatureDict(dsname, space_name, feat_map)\n\n \n # TASKS & RESULTS\n def __merge(datum, check):\n logger.debug(\"Copying %s\", datum)\n src_node = getattr(other, datum)\n dst_node = getattr(self, datum)\n for t in ProgressIter(list(src_node), label='Copying %s' % datum):\n logger.debug(\"Considering %s '%s'\", datum, t._v_name)\n\n # Check if the exact result has been previously copied\n if t._v_name in dst_node:\n logger.warn(\"Skipping previous %s: %s\", datum, t._v_name)\n else:\n md = get_metadata(t)\n for i in ignored_md: \n if i in md: \n del md[i]\n # Check for equivalent metadata\n if not allow_duplicate and check(md):\n logger.warn(\"Ignoring duplicate in %s: %s\", datum, str(md))\n else:\n try:\n self.fileh.copyNode(t, newparent=dst_node, recursive=True)\n except tables.NoSuchNodeError:\n logger.critical(\"Damaged node skipped\")\n\n if do_tasksets:\n # Copy entire nodes\n __merge('tasksets', self.has_TaskSet)\n # Now work our way through and check if any weights need updating\n for src in ProgressIter(other.get_TaskSets({}), label='Copying weights'):\n if src.node._v_name in self.tasksets:\n dst = StoredTaskSet(self, getattr(self.tasksets, src.node._v_name))\n else:\n md = dict(src.metadata)\n for i in ignored_md: \n if i in md: \n del md[i]\n dst = self.get_TaskSet(md)\n # sanity check for compatibility\n if len(src.tasks) != len(dst.tasks):\n logger.warning('number of tasks in src and dst do not match; skipping')\n continue\n for i, task in enumerate(src.tasks):\n dst.tasks[i].weights.update(src.tasks[i].weights)\n\n if do_results:\n __merge('results', self.has_TaskSetResult)", "def add(self, other):\n if not isinstance(other, self.__class__):\n raise ValueError(\n f\"Argument (type {type(other)}) is not a {self.__class__} instance\"\n )\n if len(other.data):\n self.data = pd.concat([self.data, other.data], ignore_index=True)\n self.sort()", "def get_datasets_for_tasklist(tasks):\n task_ids = [task['jeditaskid'] for task in tasks if 'jeditaskid' in task]\n\n query = {'type__in': ['pseudo_input', 'input', 'output']}\n extra_str = '1=1'\n if len(tasks) > settings.DB_N_MAX_IN_QUERY:\n # insert ids to tmp table, backend dependable\n tk = insert_to_temp_table(task_ids)\n extra_str = \"JEDITASKID in (SELECT ID FROM {} WHERE TRANSACTIONKEY={})\".format(get_tmp_table_name(), tk)\n else:\n query['jeditaskid__in'] = task_ids\n\n dsets = JediDatasets.objects.filter(**query).extra(where=[extra_str]).values()\n\n dsets_dict = {}\n for ds in dsets:\n if ds['jeditaskid'] not in dsets_dict:\n dsets_dict[ds['jeditaskid']] = []\n dsets_dict[ds['jeditaskid']].append(ds)\n\n for task in tasks:\n task['datasets'] = dsets_dict[task['jeditaskid']] if task['jeditaskid'] in dsets_dict else []\n\n return tasks", "def transpose_load_concat(self, **kwargs):\n if self.mask_train:\n datamask=self.add_mask()\n thedatas={}\n for key, value in kwargs.items():\n if not self.mask_train:\n thedatas[key]=value.X_train.transpose('a','x','y','features').values\n if self.mask_train:\n temp=value.X_train.transpose('a','x','y','features').values\n thedatas[key]=np.where(np.repeat(datamask['X_train'].values[...,np.newaxis], 4, axis=3)==0, 0, temp)\n label=value.X_train_label.values\n if len(kwargs) > 1:\n X_train=np.concatenate(list(thedatas.values()), axis=3)\n if len(kwargs)==1:\n X_train=np.squeeze(np.asarray(list(thedatas.values())))\n return X_train, label", "def task1(self):\n \n pass", "def get_task_flow_data(jeditaskid):\n data = []\n # get datasets\n datasets = []\n dquery = {'jeditaskid': jeditaskid, 'type__in': ['input', 'pseudo_input'], 'masterid__isnull': True}\n datasets.extend(JediDatasets.objects.filter(**dquery).values('jeditaskid', 'datasetname', 'type'))\n\n dataset_dict = {}\n for d in datasets:\n dname = d['datasetname'] if ':' not in d['datasetname'] else d['datasetname'].split(':')[1]\n dataset_dict[dname] = {'replica': {}, 'jobs': {}}\n\n # get jobs aggregated by status, computingsite and proddblock (input dataset name)\n jobs = []\n jquery = {'jeditaskid': jeditaskid, 'prodsourcelabel__in': ['user', 'managed'], }\n extra_str = \"( processingtype not in ('pmerge') )\"\n jvalues = ['proddblock', 'computingsite', 'jobstatus']\n jobs.extend(Jobsarchived4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobsarchived.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobsactive4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobsdefined4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobswaiting4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n\n if len(jobs) > 0:\n for j in jobs:\n if len(j['proddblock']) > 0:\n dname = j['proddblock'] if ':' not in j['proddblock'] else j['proddblock'].split(':')[1]\n else:\n dname = next(iter(dataset_dict)) if len(dataset_dict) > 0 else 'pseudo_dataset'\n if j['computingsite'] is not None and j['computingsite'] != '':\n if j['computingsite'] not in dataset_dict[dname]['jobs']:\n dataset_dict[dname]['jobs'][j['computingsite']] = {}\n job_state = j['jobstatus'] if j['jobstatus'] in const.JOB_STATES_FINAL else 'active'\n if job_state not in dataset_dict[dname]['jobs'][j['computingsite']]:\n dataset_dict[dname]['jobs'][j['computingsite']][job_state] = 0\n dataset_dict[dname]['jobs'][j['computingsite']][job_state] += j['njobs']\n\n # get RSE for datasets\n replicas = []\n if len(datasets) > 0:\n dids = []\n for d in datasets:\n if d['type'] == 'input':\n did = {\n 'scope': d['datasetname'].split(':')[0] if ':' in d['datasetname'] else d['datasetname'].split('.')[0],\n 'name': d['datasetname'].split(':')[1] if ':' in d['datasetname'] else d['datasetname'],\n }\n dids.append(did)\n\n rw = ruciowrapper()\n replicas = rw.getRSEbyDID(dids)\n\n if replicas is not None and len(replicas) > 0:\n for r in replicas:\n if r['name'] in dataset_dict:\n dataset_dict[r['name']]['replica'][r['rse']] = {\n 'state': r['state'],\n 'available_pct': round(100.0 * r['available_length']/r['length'], 1) if r['length'] > 0 else 0\n }\n\n # transform data for plot and return\n return executeTF({'data': {'datasets': dataset_dict, } })", "def merge(self , station = '' , datasets = ''):\n \n \n \n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n \n \n \"\"\"\n try:\n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n except:\n print('Failed: ' , station )\n return False \n \"\"\"", "def datasets_for_task(jeditaskid):\n dsets = []\n dsquery = {\n 'jeditaskid': jeditaskid,\n }\n values = (\n 'jeditaskid', 'datasetid', 'datasetname', 'containername', 'type', 'masterid', 'streamname', 'status',\n 'storagetoken', 'nevents', 'neventsused', 'neventstobeused', 'nfiles', 'nfilesfinished', 'nfilesfailed',\n 'nfilesmissing', 'nfileswaiting'\n )\n values = list(set(values) & set([f.name for f in JediDatasets._meta.get_fields()]))\n dsets.extend(JediDatasets.objects.filter(**dsquery).values(*values))\n\n dsets, dsinfo = calculate_dataset_stats(dsets)\n dsets = sorted(dsets, key=lambda x: x['datasetname'].lower())\n\n return dsets, dsinfo", "def make_dataset(arguments):\n connections.create_connection(hosts=ES_HOSTS, timeout=9999, http_auth=ES_LOGIN)\n time_start = time.time()\n\n # cleanup of the invalid dataset duplicate links\n if arguments.clean:\n dataset_source_cleanup()\n\n # reset dataset assignments of posts with given roles\n if arguments.reset is not None and len(arguments.reset) > 0:\n reset_dataset_flags(arguments.reset)\n\n # create dataset base from main posts and their duplicates\n if arguments.base:\n link_search = PostLink.search().filter(\"term\", link_type=3).params(scroll=\"1440m\")\n links = link_search.scan()\n\n time_start_partial = time.time()\n print(\"Creating dataset base from duplicates ...\")\n for i, link in enumerate(links):\n add_main_post_into_ds(Post.get_post(link.post_ID, link.page))\n if i % 10 == 0:\n print(f\"\\r Processing - {i}\", end=\"\")\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n print()\n\n # search and assign similar posts to all main posts in dataset\n if arguments.similar is not None:\n time_start_partial = time.time()\n print(\"Getting similar posts for posts in dataset base ...\")\n print(f\" Part: {arguments.similar}\")\n pool = Pool(PARALLEL_SLICES)\n pool.map(process_similar_posts, range(PARALLEL_SLICES))\n\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n print()\n\n # export the dataset to CSV file\n if arguments.export:\n time_start_partial = time.time()\n print(\"Exporting whole dataset to general csv...\")\n export_dataset_to_csv()\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n\n time_start_partial = time.time()\n print(\"Shuffling and splitting the general csv into train, dev and test parts...\")\n shuffle_and_split(DS_EXPORT_FILE)\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n\n time_end = time.time()\n time_total = time_end - time_start\n print(\"Dataset created successfully ...\")\n print(f\"Dataset creation process took {int(time_total / 60)} min and {int(time_total % 60)} seconds\")", "def task2(self):\n\n pass", "def test_parallel_dataflow():\n\n if os.path.exists('all.txt'):\n os.remove('all.txt')\n\n # create 5 files with random numbers\n output_files = []\n for i in range(5):\n if os.path.exists('random-%s.txt' % i):\n os.remove('random-%s.txt' % i)\n output_files.append(generate(outputs=[File('random-%s.txt' % i)]))\n\n # concatenate the files into a single file\n cc = concat(inputs=[i.outputs[0]\n for i in output_files], outputs=[File(\"all.txt\")])\n\n # calculate the average of the random numbers\n totals = total(inputs=[cc.outputs[0]])\n print(totals.result())", "def dataset()-> pd.DataFrame:\n old= pd.read_csv(DIRECT_OLD)\n new= pd.read_csv(DIRECT_NEW)\n return pd.concat([old, new])", "def concatenate_datasets(filenames_list, img_rows=128, img_cols=128):\n print('Concatenating the datasets created by data augmentation into a single one')\n print('Using the following pairs of images / masks datasets: ')\n print(filenames_list)\n print('\\n')\n\n # total number of images\n n_samples = 600 * len(filenames_list)\n\n # create np.ndarrays for the images and the targets: xCenter, yCenter, xOrientation, yOrientation\n images_dataset = np.ndarray((n_samples, img_rows, img_cols), dtype=np.uint8)\n targets_dataset = np.ndarray((n_samples, 4), dtype=np.float32)\n\n for ds, (img, mask) in enumerate(filenames_list):\n print(\" Processing {}\".format(img))\n images = np.load(\"output/augmented_data/{}.npy\".format(img))\n masks = np.load(\"output/augmented_data/{}.npy\".format(mask))\n\n for idx, mat in enumerate(masks):\n\n # get the center coordinates of the left ventricle (on the resized image)\n row, col = findCenter(img=mat, pixelvalue=1)\n\n # get the orientation of the left ventricle (on the resized image)\n x_v1, y_v1 = findMainOrientation(img=mat, pixelvalue=1)\n\n # save the center coordinates & orientation to the y dataframe (which will be the output of the network)\n targets_dataset[ds*600 + idx] = np.array([row, col, x_v1, y_v1])\n\n # save image in main dataset file\n images_dataset[ds*600 + idx] = images[idx]\n\n print('Concatenated all datasets into one & created target values for (center, orientation)')\n\n print('Splitting the dataset into 70% training & 30% testing')\n images_train, images_test, targets_train, targets_test = train_test_split(images_dataset, targets_dataset,\n test_size=0.3,\n random_state=42,\n shuffle=True)\n\n # save all ndarrays to a .npy files (for faster loading later)\n # Create directory to store files.\n directory = os.path.join(os.getcwd(), 'output/processed_data/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # save training set to file\n np.save('output/processed_data/images_train.npy', images_train)\n np.save('output/processed_data/targets_train.npy', targets_train)\n\n # save testing set to file\n np.save('output/processed_data/images_test.npy', images_test)\n np.save('output/processed_data/targets_test.npy', targets_test)\n print('Saving to .npy files done. See files: ')\n print('output/processed_data/images_train.npy')\n print('output/processed_data/targets_train.npy')\n print('output/processed_data/images_test.npy')\n print('output/processed_data/targets_test.npy')", "def mergeWith(self, others):", "def get_combined_data(self, file_path: str, train_file_name: str,\n test_file_name: str) -> pd.DataFrame:\n train_data=self.load_dataset(file_path,train_file_name)\n train_data=train_data.drop('Survived', 1)\n test_data=self.load_dataset(file_path,test_file_name)\n\n combined_data = train_data.append(test_data)\n combined_data.reset_index(inplace=True)\n combined_data.drop('index', inplace=True, axis=1)\n\n return combined_data", "def convert_concat(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.ConcatenationOptions import ConcatenationOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) > 1, \"input tensors length should be greater than 1\"\n\n data_nodes = [self.tensor_tab[t.tensor_idx] for t in input_tensors]\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions\n op_options = op.BuiltinOptions()\n concat_options = ConcatenationOptions()\n concat_options.Init(op_options.Bytes, op_options.Pos)\n concat_dim = concat_options.Axis()\n fused_activation_fn = concat_options.FusedActivationFunction()\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Concat operator with fused activation is not supported yet.'\n\n out_nodes = self.nn_concat(concat_dim, data_nodes, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes", "def mergeFile():\n with open(\"output.txt\",'w') as o:\n o.write(data1)\n o.write(data2)\n o.write(data3)", "def mergeThreads ():\n \n print \"merge threads starts\"\n data = np.zeros((1, 6))\n for threadID in xrange(0, 4):\n filename = \"TOF_nt_TOF_t%d.csv\"%(threadID)\n dataThread = np.loadtxt(filename, delimiter = \",\", skiprows = 9)\n data = np.vstack((data, dataThread))\n savefmt = [\"%.5g\", \"%.5g\", \"%.5g\", \"%d\", \"%d\", \"%d\"]\n np.savetxt(\"TOFfile.dat\", data[1:, :], fmt = savefmt)\n print \"merge threads finished\"", "def combine_data_main(data1,data2,lookup,foutput):\n\n # Get the maximum number of ortholog probesets we'll have to append\n max_orthologs = 0\n for probe_set_id in data1.keys():\n max_orthologs = max(max_orthologs,len(lookup(probe_set_id)))\n logging.debug(\"Max_orthologs = %d\" % max_orthologs)\n \n # Write header line\n line = [data1.header()]\n for i in range(1,max_orthologs+1):\n logging.debug(\"Adding header set #%d\" % i)\n for item in data2.header().split('\\t'): line.append(\"%s_%s\" % (item,i))\n foutput.write(\"%s\\n\" % '\\t'.join(line))\n\n # Append data\n for probe_set_id in data1.keys():\n # Build line to output to file\n line = [data1.fetch(probe_set_id)]\n # Get the corresponding ortholog probe set ID(s)\n logging.debug(\"Processing probe set ID %s\" % probe_set_id)\n for ortholog_probe_set_id in lookup(probe_set_id):\n ortholog_data = data2.fetch(ortholog_probe_set_id)\n if ortholog_data is not None:\n line.append(ortholog_data)\n # Write line to file\n foutput.write(\"%s\\n\" % '\\t'.join(line))", "def data_unification(self, data1, data2):\r\n data = data1 + data2\r\n return data", "def merge_arrival_and_completion_time(tests_dataframe):\r\n arrival_time_df = tests_dataframe[['time_test_arrives_lab', 'server_size']]\r\n completion_time_df = tests_dataframe[['completion_time', 'server_size']]\r\n arrival_time_df['add'] = 1\r\n completion_time_df['add'] = -1\r\n arrival_time_df = arrival_time_df.rename(columns={\"time_test_arrives_lab\":\"time\"})\r\n completion_time_df = completion_time_df.rename(columns={\"completion_time\":\"time\"})\r\n union = pd.concat([arrival_time_df, completion_time_df])\r\n union = union.sort_values(by=\"time\")\r\n prev_server_size = 0\r\n for index, row in union.iterrows():\r\n if index == 0:\r\n current_server_size= row['server_size'] + row['add']\r\n prev_server_size = current_server_size\r\n #union['server_size'] = union['server_size'] + union['add']\r\n else:\r\n current_server_size = prev_server_size + row['add'] \r\n prev_server_size = current_server_size\r\n union.at[index,'server_size'] = current_server_size\r\n #union.to_csv('union.csv')\r\n return union", "def _concat_arrays(arrays):\n # torch\n if isinstance(arrays[0], torch.Tensor):\n return torch.cat(arrays)\n\n # numpy\n if not isinstance(arrays[0], np.ndarray):\n arrays = np.asarray(arrays)\n\n return np.concatenate(arrays)", "def concatenate_images(img_1, img_2):\n res_4 = None;\n if not (img_1 is None):\n # Resize Camera and Satellite Image:\n res_1 = cv2.resize(img_2, None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)\n res_2 = cv2.resize(img_1, None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)\n\n #Concatenate Camera and Satellite view on single image\n h_1 = res_1.shape[0];\n w_1 = res_1.shape[1];\n h_2 = res_2.shape[0];\n w_2 = res_2.shape[1];\n scale = float(h_1)/float(h_2);\n\n h_2 = h_1;\n w_2 = int(w_2*scale)\n dim = (w_2, h_2);\n res_3 = cv2.resize(res_2, dim, interpolation = cv2.INTER_CUBIC)\n\n res_4 = np.concatenate((res_1, res_3), axis=1)\n\n return res_4;", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def concatenate(array1, array2, axis=0):\r\n\r\n assert isinstance(array2, numpy.ndarray)\r\n if array1 is not None:\r\n assert isinstance(array1, numpy.ndarray)\r\n return numpy.concatenate((array1, array2), axis=axis)\r\n else:\r\n return array2", "def generate_coco_dataset_sub(args, idx1, idx2, cat):\n\tdata_path = args.data_root / '{}2017'.format(idx1)\n\tanno_path = args.data_root / 'annotations/instances_{}2017.json'.format(idx1)\t# eg. anno_path is \"datasets/COCO/annotations/instances_train2017.json\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# or \"datasets/COCO/annotations/instances_val2017.json\"\n\tcoco = COCO(anno_path) # COCO API\n\n\n\timg_path = args.save_root / '{}{}'.format(idx1, idx2)\t\t# eg. img_path is \"datasets/shp2gir_coco/trainA\" or \"datasets/shp2gir_coco/trainB\"\n\tseg_path = args.save_root / '{}{}_seg'.format(idx1, idx2)\t# eg. img_path is \"datasets/shp2gir_coco/trainA_seg\" or \"datasets/shp2gir_coco/trainB_seg\"\n\timg_path.mkdir()\t\t\t\t\t\t\t\t\t\t\t# they are empty, therefore mkdir()s\n\tseg_path.mkdir()\n\n\tcat_id = coco.getCatIds(catNms=cat)\t\t# cat is \"sheep\" or \"giraffe\",get the category's id\n\timg_id = coco.getImgIds(catIds=cat_id)\t# get the ids of sheep/giraffe images,获得所有绵羊的图片id,或者所有长颈鹿的图片id\n\timgs = coco.loadImgs(img_id)\t\t\t# 获得所有绵羊的图片(很多张),或者所有长颈鹿的图片\n\n\t# tqdm表示进度条,progress\n\t# refer:https://tqdm.github.io/\n\tpb = tqdm(total=len(imgs))\n\tpb.set_description('{}{}'.format(idx1, idx2))\n\tfor img in imgs:\n\t\tann_ids = coco.getAnnIds(imgIds=img['id'], catIds=cat_id)\t# get annotation'id\n\t\tanns = coco.loadAnns(ann_ids)\t\t\t\t\t\t\t\t# get the annotation(many)\n\n\t\tcount = 0\n\t\tfor i in range(len(anns)):\t\t\t\t# 真正从标签生成mask的地方。\n\t\t\tseg = coco.annToMask(anns[i])\t\t# annotation to mask, the type is array now\n\t\t\tseg = Image.fromarray(seg * 255)\t# turn the seg array to seg image,each pix multi 255. why?\n\t\t\tseg = resize(seg, args.image_size)\t# resize the seg image\n\t\t\t# np.sum\n\t\t\tif np.sum(np.asarray(seg)) > 0:\t\t\t\t\t\t\t\t# 保存seg\n\t\t\t\tseg.save(seg_path / '{}_{}.png'.format(pb.n, count))\t# pb.n 表示?\n\t\t\t\tcount += 1\n\n\t\tif count > 0: # at least one instance exists\n\t\t\timg = Image.open(data_path / img['file_name'])\n\t\t\timg = resize(img, args.image_size)\n\t\t\timg.save(img_path / '{}.png'.format(pb.n))\n\n\t\tpb.update(1)\n\tpb.close()", "def datasets(self):\n pass", "def add_task(self, task): \n self.buffer = np.vstack((self.buffer, task))\n return self.buffer", "def load_results(task):\n\n ALGRESULTS = \"./results/task%d_formulas.csv.gz\" % (task)\n MLRESULTS = \"./results/task%d_ml.csv.gz\" % (task)\n\n dftest = pd.read_csv(\"./dftest_task%d.csv\" % (task))\n\n dfalg = pd.read_csv(ALGRESULTS)\n dfml = pd.read_csv(MLRESULTS)\n dfnn = get_nns(task)\n dfml = dfml.rename(columns={\"Unnamed: 0\":\"algs\"})\n\n merged = pd.merge(dfalg, dfml, on=[\"mesaid\",\"linetime\",\"actValue\",\"gt\",\"gt_sleep_block\"]) #\n merged = pd.merge(merged, dfnn, on=[\"mesaid\",\"linetime\",\"actValue\",\"gt\",\"gt_sleep_block\"]) #\n merged = pd.merge(merged, dftest, on=[\"mesaid\",\"linetime\",\"gt\",\"gt_sleep_block\"]) #\n\n merged[\"time\"] = pd.to_datetime(merged[\"linetime\"])\n merged[\"always1\"] = 1\n merged[\"always0\"] = 0\n\n merged[\"sleep\"] = (~merged[\"wake\"].astype(np.bool)).astype(float)\n return merged", "def combine_all(self):\n if self._train_only:\n return\n\n combined = copy.deepcopy(self.train)\n\n # relabel pids in gallery (query shares the same scope)\n g_pids = set()\n for items in self.gallery:\n pid = items[1]\n if pid in self._junk_pids:\n continue\n g_pids.add(pid)\n pid2label = {pid: i for i, pid in enumerate(g_pids)}\n\n def _combine_data(data):\n for img_path, pid, camid, dsetid in data:\n if pid in self._junk_pids:\n continue\n pid = pid2label[pid] + self.num_train_pids\n combined.append((img_path, pid, camid, dsetid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def _copyDataSetsForTube(self):\n\n # Get the datasets for the well\n dataSets = self._getDataSetForTube()\n\n # Get all fcs files for the datasets\n dataSetFiles = self._getFilesForDataSets(dataSets)\n if len(dataSetFiles) == 0:\n self._message = \"Could not retrieve files for datasets from tube.\"\n self._logger.error(self._message)\n return False\n\n # Store at the experiment level\n self._currentPath = self._experimentPath\n\n # Copy the files\n for fcsFile in dataSetFiles:\n self._copyFile(fcsFile, self._currentPath)\n\n # Return success\n return True", "def merge_two_calls(self) -> None:", "def read_combined_data(train_inputs, train_labels, train_sad, val_inputs, val_labels, val_sad):\n # combine all subsets into one and shuffle\n train_inputs_all, train_labels_all, val_inputs_all, val_labels_all, train_sad_all, val_sad_all = \\\n (dict() for _ in range(6))\n for block in train_inputs:\n train_inputs_all[block], train_labels_all[block], train_sad_all[block] = \\\n concatenate_dictionary_keys(train_inputs[block], train_labels[block], train_sad[block])\n val_inputs_all[block], val_labels_all[block], val_sad_all[block] = \\\n concatenate_dictionary_keys(val_inputs[block], val_labels[block], val_sad[block])\n\n # shuffle the triples\n train_inputs_all[block], train_labels_all[block], train_sad_all[block] = \\\n array_shuffle(len(train_sad_all[block]),\n train_inputs_all[block], train_labels_all[block], train_sad_all[block])\n val_inputs_all[block], val_labels_all[block], val_sad_all[block] = \\\n array_shuffle(len(val_sad_all[block]), val_inputs_all[block], val_labels_all[block], val_sad_all[block])\n\n return train_inputs_all, train_labels_all, train_sad_all, val_inputs_all, val_labels_all, val_sad_all", "def create_task_alias(self, data, features):\n data = data.to_dict('records')\n subsec_set = set()\n if isinstance(features, list):\n task_list = [(x[features[0]], x[features[1]]) for x in data]\n else:\n task_list = [x[features] for x in data]\n [subsec_set.add(x) for x in task_list]\n variables = sorted(list(subsec_set))\n characters = string.ascii_letters+string.digits\n # characters = [chr(i) for i in range(0, len(variables))]\n aliases = random.sample(characters, len(variables))\n alias = dict()\n for i, _ in enumerate(variables):\n alias[variables[i]] = aliases[i]\n return alias", "def collect_dataset_experiment_results(ray_task_list: list):\n res_list = []\n total_jobs = len(ray_task_list)\n logger.info('Collecting jobs. total_jobs={}'.format(total_jobs))\n for job_num in range(total_jobs):\n t1 = time.time()\n ready_id, ray_task_list = ray.wait(ray_task_list)\n res_i = ray.get(ready_id[0])\n res_list.append(res_i)\n\n # Report\n dataset_name = res_i['dataset_name'][0]\n n_trainset, n_valset, n_testset = res_i['trainset_size'][\n 0], res_i['valset_size'][0], res_i['testset_size'][0]\n logger.info('[{:04d}/{}] {}. Size [train val test]=[{:03d} {} {}] in {:3.1f}s.'.format(\n job_num, total_jobs - 1, dataset_name, n_trainset, n_valset, n_testset, time.time() - t1))\n\n # Save to file\n res_df = pd.concat(res_list, ignore_index=True, sort=False)\n res_df = res_df.sort_values(by=['dataset_name', 'num_features', 'trainset_size', 'split'],\n ascending=[False, True, True, True])\n return res_df", "def test_create_two_independent_tasks():\n task1 = Task(\"print book\",\"Fabio\",True,1)\n task2 = Task(\"buy book\", \"Elisa\", False, 2)\n\n assert task1.summary != task2.summary" ]
[ "0.685715", "0.6508406", "0.6459176", "0.63558143", "0.63046694", "0.617627", "0.6152155", "0.5977995", "0.58850974", "0.58077186", "0.5803393", "0.57910776", "0.5675971", "0.5674531", "0.5653291", "0.5594489", "0.55606794", "0.55558187", "0.55526024", "0.5524445", "0.5524164", "0.55203766", "0.55157864", "0.55030775", "0.54902935", "0.5480768", "0.54485005", "0.543827", "0.5429617", "0.54236305", "0.5414311", "0.54104346", "0.5383233", "0.5376107", "0.53731465", "0.53706133", "0.5358294", "0.53495294", "0.53427464", "0.53372294", "0.53269714", "0.5322908", "0.52989346", "0.5297358", "0.52928853", "0.5279363", "0.52688634", "0.5267956", "0.5258334", "0.5245307", "0.5244189", "0.5219912", "0.5215197", "0.52070916", "0.52017933", "0.5189885", "0.51802784", "0.5179413", "0.51786256", "0.5178023", "0.517441", "0.51709306", "0.5169622", "0.5163919", "0.5161569", "0.5152631", "0.51395875", "0.51322645", "0.51291734", "0.51234305", "0.51121086", "0.5107894", "0.51047516", "0.5104272", "0.510322", "0.5102654", "0.5102301", "0.50983846", "0.50882703", "0.507717", "0.50769913", "0.5073296", "0.50671494", "0.5052455", "0.5046229", "0.50407505", "0.5036714", "0.5032131", "0.5026829", "0.50228417", "0.5019257", "0.5019157", "0.5017108", "0.50148547", "0.50077176", "0.500561", "0.50035745", "0.49985963", "0.4993958", "0.49894467" ]
0.7752744
0
Clean the cache before autotraining, which can avoid .
def _clean_cache(self): torch = import_optional_dependency("torch") if self.device == torch.device('cuda'): with torch.cuda.device(self.device): torch.cuda.empty_cache()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_cache(self):\n return", "def clear_model_cache():\n global __model_cache\n __model_cache = {}", "def clear_cache():\n # TODO\n pass", "def _clean_cache(self):\n del self._cache\n self._cache = {}", "def decache(self):", "def clear_cache(self):\n pass", "def _purge():\r\n _cache.clear()", "def reset_cache():\n global _CACHE\n _CACHE.clear()", "def _clear_cache(self):\n self.cache = {}", "def _clear_image_cache(self):\n logger.debug(\"Clearing image cache\")\n self._pathoutput = None\n self._previewoutput = None\n self._previewtrain = dict()\n self._previewcache = dict(modified=None, # cache for extract and converts\n placeholder=None)", "def cache_clear(self):\n\t\tself.__cache = {}", "def destroy_cache():\n # TODO\n pass", "def reset_cache(self):\n self.cache = [None] * self.n_layers\n self.offset = 0\n logger.debug('Reset cache.')", "def reset_cache(self):\n self._cache_complete = False\n self._cache = {}\n self._catcache = {}", "def _clear_cache(self):\n\n self._cache = dict()", "def shutdown(self):\n del self.model\n del self.train_dataset\n del self.test_dataset", "def reset_cache(self):\n if self.cache_address is not None:\n for add in self.cache:\n os.remove(add + \".cd\")\n os.remove(add + \".cl\")\n self.cache = [None] * len(self)", "def invalidate_cache(self):\n #self.objects.objects = []\n return True", "def clear_data_cache():\n load_glove.cache_clear()", "def clear_cache():\n cache = Cache()\n cache.reset()", "def reset_cache(self):\n self.izx.reset_cache()\n self.ezx.reset_cache()", "def reset(self):\n # Clear mutable data, but leave the immutables intact\n self.train_data = {}\n self.val_data = {}\n self.test_data = {}\n self.model_files = []\n self.custom_data = {}\n # Remove all the physical assets\n for item in os.scandir(self.root_path):\n os.remove(item.path)\n # Reserialize\n self.serialize()", "def _clear_model_caches(self):\n for comp in getattr(self.model, u'component', []):\n for math in getattr(comp, u'math', []):\n math._unset_cached_links()\n for var in self.model.get_all_variables():\n var.clear_dependency_info()\n assignment_exprs = self.model.search_for_assignments()\n for expr in assignment_exprs:\n expr.clear_dependency_info()", "def clear_inference_tip_cache() -> None:\n _cache.clear()", "def cache_clear(self):\n self.fold_term.cache_clear()", "def delete_anisotropy_cache(self):\n if hasattr(self._model, 'delete_cache'):\n self._model.delete_cache()", "def reset(self):\n\n self.simple_cache = {}\n self.complex_cache = {}\n self.target_cache = {}", "def reset_epoch_cache(self):\n self.epoch_cache = {\"train\":PerformanceBatch(), \n \"val\":PerformanceBatch(), \n \"test\":PerformanceBatch()}", "def clean(self):\n self.iiter = 0\n print(colored('Finished patch %s' % self.image_name, 'yellow'))\n torch.cuda.empty_cache()\n self.loss_min = None\n self.history = u.History(self.args.epochs)", "def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None", "def clear_cache(self):\n self._cache = dict()", "def reset(self):\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None", "def clear_cache(self):\n for fle in self.cache_location.glob(\"*.pickle\"):\n fle.unlink()", "def clear(self, cacheDir):", "def clean(self):\n super(NoneCache, self).clean()", "def clearCache(cls):\n cls._cameraCache = None", "def clear_cache(self):\n self.part_cache.clear()", "def clear_cache(self):\n\n for dataset in self._datasets:\n dataset.clear_cache()", "def reset_cache():\n setup_cache({})\n yield # test\n setup_cache({})", "def cleanup(self):\n if self.cleanup_allowed:\n shutil.rmtree(self.out_dir)\n self.train_df, self.valid_df, self.test_df = None, None, None", "def _clean_up_experiment(self):\n if self.module_name == \"keras\":\n K.clear_session()", "def clearImageCache(self):\n if os.path.exists(\"./cache/\"):\n shutil.rmtree(\"./cache/\")", "def tearDown(self):\n api.clear_cache()", "def tearDown(self):\n api.clear_cache()", "def delete_cache(self):\n if hasattr(self, '_f_12_interp'):\n del self._f_12_interp\n if hasattr(self, '_f_32_interp'):\n del self._f_32_interp", "def _post_training_cleanup(self):\n tf.reset_default_graph()\n self.sess.close()\n os.chdir(\"../../\")", "def clear_cache():\n path = join(\"data\", \"cache\")\n file_list = os.listdir(path)\n file_list.remove(\".gitkeep\") # Exclude .gitkeep\n for filename in file_list:\n os.remove(join(path, filename))", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def reset_train(self):\n\n self.model.apply(self._reset_weights)\n self.epoch_loss.reset()\n self.epoch = 0\n del self.batch_process\n self.batch_process = None", "def purge_cache(self):\n\n self.local_store.purge_cache()", "def test_clear_cache(self):\n api_helpers.clear_cache()", "def clear_cache():\n os.remove(CACHE_FILE)", "def clear_cache(sender, **kwargs):\n# print \"Post save() -> clear cache\"\n cache.clear() # FIXME: This cleaned the complete cache for every site!", "def clear_scache(cls) -> None:\n cls.scache = {}", "def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}", "def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None", "def _reset():\n global g_list_of_classifier\n global g_state\n\n g_state = False\n g_list_of_classifier = disco_classifiers([])", "def reset(self):\n checkpoint = torch.load(\n 'model_lr_finder.pth.tar',\n map_location=self.device)\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.to(self.device)\n self.model.train()", "def clear_cache(self):\n self.mongo_database.cache.delete_many({})", "def invalidateCaches(self):\n\n self._vertexCacheValid = False\n self._genusCacheValid = False\n self._vertexCharacteristicCacheValid = False\n self._coreCacheValid = False", "def shutdown_training(self):\n\n self._train_data_set = None\n self._test_data_set = None", "def clear_cache():\n run(\"rm -rf ~/public_html/var/cache/mage*\")\n run(\"redis-cli FLUSHALL\")", "def clear(self):\n self._cache = dict()", "def _reset(self):\n self.use_gpu = torch.cuda.is_available()\n if self.use_gpu:\n self.model = self.model.cuda()\n self.hist_train_psnr = []\n self.hist_val_psnr = []\n self.hist_loss = []", "def flush():\n for k in cache._thecache.keys():\n del cache._thecache[k]", "def clear_cache(self):\n self._cache = {}\n DrugBank._cache_record = {}\n DrugBank._top_root = None", "def set_emptying_cache():\r\n from pylons import g\r\n from r2.lib.cache import SelfEmptyingCache\r\n g.cache.caches = [SelfEmptyingCache(),] + list(g.cache.caches[1:])", "def cache_clean():\n run(cmd=\"rm -rf ~/Library/Developer/Xcode/DerivedData/*\")", "def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0", "def _untrain(self):\n if self.__clf:\n self.__clf._untrain()", "def clear_cache():\n global custom_memory, custom_hit, custom_miss\n custom_memory = {}\n custom_hit = 0\n custom_miss = 0\n return", "def clear_element_cache():\n local_stiffness.cache_clear()\n transformation_matrix.cache_clear()", "def setUp(self) -> None:\n cache.delete('test_blockbuster.movie_repository.find_all')", "def _clean_up(self):", "def tearDown(self):\n clear_url_caches()", "def tearDown(self):\n clear_url_caches()", "def clean(self):\n self.sess.run(self.init_op)\n print(\"Clean the running state of graph!\")", "def clear(self):\n try:\n shutil.rmtree(self._cache_path)\n self._init_cache_path()\n except Exception:\n return", "def clear_all() -> None:\n datastore.db.client.drop_database(DATABASE_NAME)\n ClassifierCache.clear_all()", "def empty_cache():\n\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)\n os.makedirs(cache_dir)", "def clean_cache_step(self):\n logger.info('Step {}, cleaning cache'.format(self.name))\n self.output = None\n return self", "def reset():\n teardown_db()\n build()", "def tearDownClass(cls):\n os.removedirs(cls.test_dir)\n del cls.checkpoint\n del cls.dataset\n del cls.experiment\n del cls.test_dir\n del cls.tokenizer_parameters\n gc.collect()", "def remove_cache(self) -> None:\n self.indexes = None", "def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()", "def _reset_cache(self, key=None):\n if getattr(self, '_cache', None) is None:\n return\n if key is None:\n self._cache.clear()\n else:\n self._cache.pop(key, None)", "def reset(self):\n self.best_model = None\n self.best_res = -1", "def reset(self):\n self.pred = None\n self.target = None", "def reset(self):\n self.pred = None\n self.target = None", "def reset_training_data(self):\n logger.info(\"resetting training data\")\n if self.shuffle:\n random.shuffle(self.tweets)\n self.batch_generator = self.get_batch()", "def clean(self):\n self.decay()\n self.hit_count = 0.0\n self.out_layer.cost = 0.0\n self.batch_loss = 0.0", "def clear_cache():\n if os.path.exists(get_cachedir()):\n for filename in os.listdir(get_cachedir()):\n if not filename.endswith('.cache'):\n continue\n\n path = os.path.join(get_cachedir(), filename)\n os.unlink(path)", "def clear_brain():\n\n if os.path.exists(os.path.abspath(\"papaya_data\")):\n shutil.rmtree(os.path.abspath(\"papaya_data\"))", "def clear_cache(self):\n ida_strlist.clear_strlist()", "def _untrain(self):\n if not self.trained:\n return\n for clf in self.clfs:\n clf.untrain()\n super(BoostedClassifier, self)._untrain()", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def reset(self):\n self.reset_cache_dir()\n self.reset_download_dir()", "def reset(self):\n self.dynamic_predictions = {}\n self.position = 0\n self.references = []", "def reset():\n\n global optimizer_data\n global optimizer_len\n\n optimizer_data = []\n optimizer_len = 0\n return", "def clearmemo(self):\n # see utils.memozie_method\n if hasattr(self, '_cache'):\n self._cache.clear()" ]
[ "0.73841304", "0.7362195", "0.72837085", "0.725626", "0.72057503", "0.7171156", "0.7111482", "0.7094823", "0.7086294", "0.7061974", "0.69822985", "0.69769466", "0.6956917", "0.6934986", "0.6898216", "0.68953973", "0.68409723", "0.6832112", "0.6817994", "0.6815026", "0.6789414", "0.67707884", "0.676565", "0.6762848", "0.6747734", "0.6742023", "0.6703319", "0.6697102", "0.66859347", "0.6681976", "0.66367483", "0.66356957", "0.66267693", "0.66229415", "0.66127026", "0.6599028", "0.65856075", "0.6565364", "0.65604985", "0.65441966", "0.6528199", "0.6528047", "0.6523411", "0.6523411", "0.64911216", "0.64880955", "0.64607334", "0.6460591", "0.6441028", "0.6407237", "0.6405862", "0.64045763", "0.6400668", "0.6393764", "0.63930595", "0.6379021", "0.63709986", "0.63690364", "0.63632125", "0.6354133", "0.6344357", "0.6322482", "0.6297322", "0.6296398", "0.6277039", "0.62610817", "0.6259049", "0.6253287", "0.6242555", "0.62417513", "0.6220365", "0.6214302", "0.6201896", "0.6201598", "0.619393", "0.619393", "0.61915416", "0.6190764", "0.61859566", "0.61782056", "0.6167683", "0.6164147", "0.6158059", "0.6156781", "0.61549646", "0.61498284", "0.6145744", "0.61457205", "0.61457205", "0.61435527", "0.6137494", "0.61301976", "0.61286914", "0.61187", "0.61069953", "0.60953903", "0.6088635", "0.6081525", "0.6074146", "0.60679543" ]
0.71076494
7
The exception of ValueError when format was unsupported.
def _raise_format_error(self, name: str, format_str: str, source_format: str): raise ValueError(f"The '{ name }' should be { format_str }, rather than { source_format }")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_format(self):\n raise NotImplementedError()", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def _unknown_format(self, format):\n\n raise errors.NotAcceptable('unknown data format: ' + format)", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def check_dataset_format(ds_format):\n if ds_format.lower() not in DATASET_FORMATS.keys():\n raise ValueError(\"dataset_format is expected to be one of %s. '%s' is not valid\" % (\n ', '.join(DATASET_FORMATS.keys()), ds_format,))", "def test_invalid_from_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_from_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def test_parseTimeInvalidFormat(self):\n self.assertRaises(ValueError, imap4.parseTime, u\"invalid\")", "def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def test_invalid_format(api):\n\twith pytest.raises(top_stories.InvalidFormatType):\n\t\tapi.get_stories(\"home\", \"xml\")", "def test_invalid_reader_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_reader_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def handle_invalid_arguments(e):\n errors = e.message\n return generic_errors(errors, code=400)", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def unsuported_format(self, msg):\n raise UnsupportedError(self.file.name+\" linker map format not supported by parser:\\n \"+ msg)", "def validate(self):\n if self.params.get(\"format\"):\n if self.params[\"format\"] not in formats:\n raise ValueError(f\"format must be one of {formats}: {self.dt}\")\n for p in self.required:\n if not self.params.get(p):\n raise ValueError(f\"{p} missing: {self.dt}\")", "def _validate_format(self, full_encrypted_value, **options):\n\n if not self.FORMAT_REGEX.match(full_encrypted_value):\n raise InvalidEncryptedValueError('Input value is not a valid '\n '[{current}] encryption value.'\n .format(current=self._get_algorithm()))", "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def test_parse_date_exceptions(\n test_input: typing.Any,\n expected: Exception,\n):\n with pytest.raises(expected):\n tvmaze.parsers.parse_date(test_input)", "def test_validate_with_invalid_key_format_type(self):\n key_format_type = \"invalid\"\n kwargs = {'key_format_type': key_format_type}\n\n self.assertRaisesRegex(\n TypeError, \"invalid key format type\", Digest, **kwargs)", "def testDataFormatNotSupported(self):\n\n x = tf.constant(0.0, shape=(2, 8, 6))\n data_format = \"WNC\"\n self.assertNotIn(data_format, conv.SUPPORTED_1D_DATA_FORMATS)\n\n with self.assertRaisesRegexp(ValueError, \"Invalid data_format\"):\n snt.Conv1D(output_channels=4, kernel_shape=4, data_format=data_format)(x)", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def test_invalid_date_format(self):\n date_field = 'expiry_date'\n self.batch_data['expiry_date'] = date_field\n resp = self.query_with_token(\n self.access_token, batch_info_query.format(**self.batch_data))\n self.assertIn(\n 'invalid literal',\n resp['errors'][0]['message'])", "def test_parser_raises_decode_error(self):\n with self.assertRaises(ParseError):\n self.parser.parse(\n stream=BytesIO(b'{\"value\": NaN}'),\n media_type=\"application/json\",\n parser_context={},\n )", "def _raise_value_error(is_gt, tracker, seq):\n if is_gt:\n raise TrackEvalException(\n 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)\n else:\n raise TrackEvalException(\n 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '\n 'Is data corrupted?' % (tracker, seq))", "def _validate_data_format(data_format):\n data_format_ = str(data_format).upper()\n if data_format_ in {'NHWC', 'NCHW'}:\n return data_format_\n raise ValueError(\n 'Argument data_format=\"{}\" not recognized; must be one of '\n '{{\"NHWC\", \"NCHW\"}} (case insensitive).'.format(data_format))", "def _raise_argument_validation_exception(typedef, value, detail, expected_tokens=None):\n typedef_name = typedef.get('help-name')\n if typedef_name is None:\n typedef_name = typedef.get('name')\n if typedef_name is None:\n typedef_name = typedef.get('field')\n if typedef_name is None:\n typedef_name = '<unknown-type>'\n if detail is None:\n detail = ''\n validation_error_format = typedef.get('validation-error-format',\n 'Invalid %(typedef)s: %(value)s; %(detail)s')\n validation_error = (validation_error_format %\n {'typedef': typedef_name, 'value': str(value), 'detail': detail})\n raise error.ArgumentValidationError(validation_error, expected_tokens)", "def _FormatException(exc):\n return ''.join(traceback.format_exception_only(type(exc), exc))", "def test_single_specifier_missing(self):\n template = 'missing'\n value_count = 1\n msg = 'The formatter should contain one \"{}\" specifier.'\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def testDataFormatNotSupported(self):\n\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n data_format = \"NWCH\"\n self.assertNotIn(data_format, conv.SUPPORTED_2D_DATA_FORMATS)\n\n with self.assertRaisesRegexp(ValueError, \"Invalid data_format\"):\n snt.Conv2D(output_channels=4, kernel_shape=(4, 4),\n data_format=data_format)(x)", "def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )", "def unexpected_error(self, exception):", "def testConstructorValueError(self):\n test_cases = [\n 'these',\n 'are',\n 'bad',\n 'data',\n 'types',\n 'FILE',\n 'STRING',\n 'JSON',\n ]\n for bad_data_type in test_cases:\n with self.assertRaises(ValueError):\n ASCIITransportFormat(bad_data_type, '')", "def _handle_bad_input_date(f):\n def date_handler_wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n import re\n e_str = \"{}\".format(e)\n for r in [\".*date/time field value out of range: \\\"(.*)\\\".*LINE\",\n \".*invalid input syntax for type timestamp: \\\"(.*)\\\".*\",\n \".*timestamp out of range: \\\"(.*)\\\".*\"]:\n p = re.compile(r, re.DOTALL)\n m = p.match(e_str)\n if m and len(m.groups()) > 0:\n bad_date = m.group(1)\n raise wsme.exc.ClientSideError(_(\n \"Invalid date '{}' specified\".format(bad_date)))\n raise\n return date_handler_wrapper", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def test_invalid_to_output_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_to_output_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n \"Output format type must be either html or html5.\", message\n )", "def test_export_evokeds_unsupported_format(fmt, ext):\n evoked = read_evokeds(fname_evoked)\n errstr = fmt.lower() if fmt != \"auto\" else \"vhdr\"\n with pytest.raises(ValueError, match=f\"Format '{errstr}' is not .*\"):\n export_evokeds(f\"output.{ext}\", evoked, fmt=fmt)", "def test_convert_date_error(self):\n try:\n convert_to_date('N/A', FORMAT_CALENDAR)\n except ValueError as error:\n self.assertEqual(type(error), ValueError)", "def test_parse_duration_exceptions(\n test_input: typing.Any,\n expected: Exception,\n):\n with pytest.raises(expected):\n tvmaze.parsers.parse_duration(test_input)", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def test_parse_invalid_version(self):\n version = VersionNumberScaleMeasurement.parse_version(\"This is not a version number\")\n self.assertEqual(Version(\"0\"), version)", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def testDataFormatNotSupported(self):\n\n x = tf.constant(0.0, shape=(2, 8, 6))\n data_format = \"WNC\"\n self.assertNotIn(data_format, conv.SUPPORTED_1D_DATA_FORMATS)\n\n with self.assertRaisesRegexp(ValueError, \"Invalid data_format\"):\n snt.CausalConv1D(output_channels=4, kernel_shape=4,\n data_format=data_format)(x)", "def test_cast_y_axis_extrema_invalid_input(self):\r\n self.assertRaises(ValueError, _cast_y_axis_extrema, 'foo')", "def _check_tt_data_format(ttdata: dict, name: str) -> None:\n formatVersion = ttdata.get(\"formatVersion\", None)\n if not isinstance(formatVersion, str):\n raise TypeError(\n f\"Illegal type '{type(formatVersion).__name__}' instead of 'str' for \"\n f\"formatVersion for instructions in {name}.\"\n )\n if formatVersion != \"1\":\n raise NotImplementedError(\n f\"Unknown formatVersion {formatVersion} for instructions in {name}.\"\n )", "def test_invalid_date_format_add_warning(self):\n req = MockRequest(self.env, args={\n 'from': '2011-02-02T11:38:50 01:00',\n })\n\n TimelineModule(self.env).process_request(req)\n\n self.assertIn(u'\"2011-02-02T11:38:50 01:00\" is an invalid date, '\n u'or the date format is not known. Try \"%s\" or \"%s\" '\n u'instead.' % (get_date_format_hint(locale_en),\n get_date_format_hint('iso8601')),\n req.chrome['warnings'])", "def test_unsupported_format():\n formatter = TabularOutputFormatter()\n\n with pytest.raises(ValueError):\n formatter.format_name = \"foobar\"\n\n with pytest.raises(ValueError):\n formatter.format_output((), (), format_name=\"foobar\")", "def test_invalid_product_line_raises_value_error(self):\n def parse_invalid_product_line():\n rec = SeqIO.read(path.join('GenBank', 'invalid_product.gb'),\n 'genbank')\n self.assertRaises(ValueError, parse_invalid_product_line)", "def test_validate_date_entry_returns_correct_ValueError(self):\n date_string = \"2018-21-01\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"{} is not valid in format {}\".format(\n date_string,\n date_format['UI format']\n )\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def test_handle_raise_validation_error(self) -> None:\n with pytest.raises(ValidationError) as excinfo:\n FileLookup.handle(\"foo:bar\")\n assert excinfo.value.errors() == [\n {\n \"loc\": (\"codec\",),\n \"msg\": f\"Codec 'foo' must be one of: {', '.join(CODECS)}\",\n \"type\": \"value_error\",\n }\n ]", "def test_validate_format(self):\n\n class WidgetConfigurableDataset(ConfigurableDatasetRequest):\n VALID_DATASET_FORMATS = ('json', 'xml')\n\n t = WidgetConfigurableDataset('SomeCoolSet',\n params=dict(subjectid='45838',\n locale='eng',\n app_instance_uuid='1234'))\n\n self.assertEqual(Url('datasets/SomeCoolSet?subjectid=45838&'\n 'locale=eng&'\n 'app_instance_uuid=1234'), Url(t.url_path()))\n\n with self.assertRaises(ValueError) as err:\n t = WidgetConfigurableDataset('SomeCoolSet',\n dataset_format=\"tsv\",\n params=dict(subjectid='45838',\n locale='eng',\n app_instance_uuid='1234'))\n\n self.assertEqual(\"Dataset format tsv is not valid for SomeCoolSet\", str(err.exception))", "def test_convert_incompatible_units(self):\n self.assertRaises(ValueError, convert_units, self.arr, 'm')", "def parse_invalid(self):\n with self.assertRaises(\n (avro.errors.ProtocolParseException, avro.errors.SchemaParseException),\n msg=f\"Invalid protocol should not have parsed: {self.test_proto!s}\",\n ):\n self.test_proto.parse()", "def test_parse_time_exceptions(\n test_input: typing.Any,\n expected: Exception,\n):\n with pytest.raises(expected):\n tvmaze.parsers.parse_time(test_input)", "def validate_date(column_name, value, date_format, column_data_type=\"date\"):\n value = value.replace(\"T\", \" \")\n dtpart = value.split(\" \")\n value = dtpart[0]\n try:\n datetime.strptime(value, date_format)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)", "def test_datetime_invalid_string(self):\n self.assertRaises(RuntimeError, awstats_reader.awstats_datetime, '2009')", "def __init__(self, source, bad):\n super(RequestFormatException, self).__init__()\n self.source = source\n self.bad = bad", "def test__specification_type_to_python_type_unsupported_type(self):\n with self.assertRaises(TypeError):\n _specification_type_to_python_type(\"unsupported_type\")", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n ) -> None:\r\n ...", "def testUnrecognizedFieldWrongFormat(self):\n\n class SimpleMessage(messages.Message):\n value = messages.IntegerField(1)\n\n message = SimpleMessage(value=3)\n message.set_unrecognized_field('from_json', 'test', messages.Variant.STRING)\n\n encoded = protobuf.encode_message(message)\n expected = (\n chr((1 << protobuf._WIRE_TYPE_BITS) | protobuf._Encoder.NUMERIC) +\n chr(3))\n self.assertEquals(encoded, expected)", "def test__validate_supported_codec_raise_value_error(self) -> None:\n with pytest.raises(ValidationError) as excinfo:\n ArgsDataModel(codec=\"foo\")\n assert excinfo.value.errors() == [\n {\n \"loc\": (\"codec\",),\n \"msg\": f\"Codec 'foo' must be one of: {', '.join(CODECS)}\",\n \"type\": \"value_error\",\n }\n ]", "def test_gender_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_gender(val))", "def test_to_scaler_non_allowed_value_error(self):\n\n with pytest.raises(\n ValueError,\n match=r\"\"\"scaler should be one of; \\['min_max', 'max_abs', 'standard'\\]\"\"\",\n ):\n\n ScalingTransformer(columns=\"b\", scaler=\"zzz\", scaler_kwargs={\"a\": 1})", "def check_validity_input_formats(input_formats):\n from invenio.search_engine import get_available_output_formats\n valid_formats = get_available_output_formats()\n\n # let's to extract the values of the available formats\n format_values = []\n for aformat in valid_formats:\n format_values.append(aformat['value'])\n\n invalid_format = ''\n for aformat in input_formats:\n if aformat.lower() not in format_values:\n invalid_format = aformat.lower()\n break\n return invalid_format", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def testDataFormatNotSupported(self):\n\n x = tf.constant(0.0, shape=(2, 7, 8, 9, 6))\n data_format = \"NCHWD\"\n self.assertNotIn(data_format, conv.SUPPORTED_3D_DATA_FORMATS)\n\n with self.assertRaisesRegexp(ValueError, \"Invalid data_format\"):\n snt.Conv3D(output_channels=4, kernel_shape=4, data_format=data_format)(x)", "def test_check_wrong_argument_type(self, number, base):\n with self.assertRaises(exceptions.WrongArgumentTypeError):\n positional.decode(number, base)", "def _warn_if_invalid(nb, version):\n from nbformat import ValidationError, validate\n\n try:\n validate(nb, version=version)\n except ValidationError as e:\n get_logger().error(\"Notebook JSON is not valid v%i: %s\", version, e)", "def test_invalid_writer_output_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_writer_output_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n \"Output format type must be either html or html5.\", message\n )", "def test_invalid_value(self):\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.length('25a', LENGTH_KILOMETERS)\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.temperature('50K', TEMP_CELSIUS)", "def _validate_format(format_type):\n if format_type not in GeopandasWriter.formats:\n raise ValueError('Unsupported file format.')\n\n return True", "def test_convert_invalid_unit():\n with pytest.raises(ValueError):\n pressure_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)\n\n with pytest.raises(ValueError):\n pressure_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)", "def error(self, message):\n raise ArgumentParseError(message)", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))", "def test_validation_get_valid_formats(self):\n self.assertIsInstance(api.validation.fetch_formats(), dict)", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n) -> None:\r\n ...", "def test_process_optional_header_data_bad_header_length(self):\n with self.assertRaises(ValueError):\n decoder.process_optional_header_data(BytesIO(td.external_timestamp(True)), 3, self.mask)", "def test_age_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_age(val))", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_150(self):\n self.assertRaises(\n exceptions.DataONEExceptionException, exceptions.deserialize,\n INVALID_ERROR_DOC[0]\n )", "def _validate_extension(self, extension, allowed_extensions):\n if extension not in allowed_extensions:\n raise LiveDocxError(\"That format isn't allowed - please pick one of these: %s\" % (','.join(self.ALLOWED_TEMPLATE_EXT))\n\nclass LiveDocxError(Exception):", "def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))", "def test_friendly_exception_formatting_exc_with_str_overload():\n ex = InsufficientSignatures(1, 3)\n\n formatted_exception = friendlyEx(ex)\n\n assert formatted_exception == '{}'.format(ex.reason)", "def test_date_of_birth_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_date_of_birth(val))", "def test_value_error(self):\n self._error_test(ValueError)", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def test_from_knx_wrong_parameter2(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx(\"0x23\")", "def test_decode_errors(self):\n if self._invalid_encoded:\n self.assert_raises((ValueError, jsonschema.exceptions.ValidationError),\n self.import_cls.decode,\n self._invalid_encoded[0], self.typedef)", "def test_handle_raise_value_error(self) -> None:\n with pytest.raises(ValueError) as excinfo:\n FileLookup.handle(\"foo\")\n assert (\n str(excinfo.value) == \"Query 'foo' doesn't match regex: \"\n \"^(?P<codec>[base64|json|json-parameterized|parameterized|\"\n \"parameterized-b64|plain|yaml|yaml-parameterized]:.+$)\"\n )", "def test_day_microseconds_wrong_type(self):\n with self.assertRaises(ValueError):\n utils.day_microseconds('dummy')", "def test_no_input_format(self):\n pandoc_default_files = [\n os.path.join(TEST_DEFAULT_FILES_PATH, \"no_input_format.yaml\")\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"No input format specified.\", message)", "def test_invalid_process(self):\n with self.assertRaises(TypeError):\n self.encoder.process([1, 2, 3, 4])", "def test_error_basis_state_format(self, basis_state, wires):\n\n with pytest.raises(ValueError, match=\"'basis_state' must only contain\"):\n BasisStatePreparation(basis_state, wires)", "def test_with_invalid_input(self):\n for dataset_type in ['ruler', 'pencil', 'cheese']:\n with self.assertRaises(ValueError) as exc:\n check_dataset_type(dataset_type)\n self.assertEqual(\"Dataset type not 'regular' or 'raw' is %s\" % dataset_type,\n str(exc.exception))", "async def test_return_error_if_any(request_format): # type: ignore[no-untyped-def]\n bad_python = \"this_is_bad = 'hihi\"\n\n response: HTTPResponse = await request_format(\n formatter=\"black\",\n code=[bad_python],\n options={\"line_length\": 123, \"string_normalization\": False},\n )\n json_result = _check_http_code_and_schema(\n response=response,\n expected_code=200,\n expected_schema=EXPECTED_FROMAT_SCHEMA,\n )\n assert json_result[\"code\"][0][\"error\"] == \"Cannot parse: 1:13: this_is_bad = 'hihi\"", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_data_parse_invalid_json(self):\n lines = ['{\"a\": \"val\" \"b\": \"val2\"}']\n self.assertRaises(TypeError, parser._parse_data, lines)" ]
[ "0.7590254", "0.72390485", "0.721036", "0.6805415", "0.6779957", "0.6535371", "0.6451091", "0.6289927", "0.620279", "0.6175287", "0.61463153", "0.6133835", "0.60960364", "0.6054621", "0.6033631", "0.60062885", "0.6004486", "0.5990601", "0.59883577", "0.5986072", "0.5967362", "0.59346", "0.5932319", "0.59104335", "0.59084284", "0.5900029", "0.5897594", "0.58964026", "0.5889684", "0.58832926", "0.58827966", "0.58732593", "0.58635545", "0.5854906", "0.58487076", "0.5840633", "0.58373547", "0.5832657", "0.58111066", "0.57972217", "0.57969564", "0.57643753", "0.5757433", "0.5749168", "0.57432413", "0.57353014", "0.5728409", "0.5721508", "0.570487", "0.57008207", "0.5682637", "0.5671913", "0.56698954", "0.5669639", "0.5657636", "0.56496584", "0.5646691", "0.5631064", "0.5628542", "0.562559", "0.56156945", "0.56130826", "0.560879", "0.5607184", "0.5600433", "0.55962294", "0.5594241", "0.5593141", "0.5585677", "0.5566058", "0.55614424", "0.5561243", "0.55603164", "0.5553009", "0.555292", "0.55524915", "0.555013", "0.55500793", "0.5549697", "0.5547678", "0.5541788", "0.55381745", "0.5532914", "0.5529149", "0.55276746", "0.551696", "0.55143666", "0.550811", "0.5506005", "0.55013096", "0.5494833", "0.54947203", "0.5488246", "0.5478348", "0.5475928", "0.5475109", "0.5470572", "0.5470449", "0.54685235", "0.54620004" ]
0.7319483
1
Initialize the Neural Network training process.
def __init__(self, model=None, train_dataset=None, eval_dataset=None, optimizer=None, criterion=None, cpu: bool = False): # import torch for initialization torch = import_optional_dependency("torch") # ============== basic parameters ============== # # the device that used to train models, which can automatically set self.device = torch.device("cuda" if torch.cuda.is_available() and not cpu else "cpu") # the optimizer of training self.optimizer = optimizer # the neural network model self.model = model.to(self.device) if model else None # the criterion of training self.criterion = criterion.to(self.device) if criterion else None # the dataset for training self.train_dataset = train_dataset # the dataset for evaluation self.eval_dataset = eval_dataset # the training process would show information if self.info is True self.info = True # ============== the parameters of training ============== # # the loss average meter for every epoch self.epoch_loss = AverageMeter() # the counter for training self.epoch = 0 # training process for iteration self.batch_process = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def TrainNetwork(self):\n\n self.logger.info('Train Network')\n self.netWork.TrainGenerator()\n\n # # train NetworkLSTM\n self.logger.info('Train NetworkLSTM')\n self.netWork.TrainLSTM()", "def setup(\n self,\n dim_data: int,\n neural_net: ModelBase,\n optimizer: optax.OptState,\n ):\n # neural network\n self.state_neural_net = neural_net.create_train_state(\n self.rng, optimizer, dim_data\n )\n\n # step function\n self.step_fn = self._get_step_fn()", "def setUp(self):\n self.X_train, self.y_train = load_data(\"../data/traindata.mat.tar.gz\")\n self.nn = NN_hwr([len(self.X_train[0]), 50, 10])", "def prepare_learning(self):\n print 'Separating inputs and outputs...'\n self.inputs, self.outputs = extract_samples(self.matches,\n self.input_features,\n self.output_feature)\n\n print 'Normalizing data...'\n self.normalizer, self.inputs = normalize(self.inputs)\n\n print 'Separating train and test sets...'\n self.train_inputs, self.train_outputs, self.test_inputs, self.test_outputs = split_samples(self.inputs, self.outputs)\n\n print 'Building neural network...'\n self.network = buildNetwork(len(self.input_features),\n 2 * len(self.input_features),\n 1,\n outclass=SigmoidLayer,\n bias=True)\n\n print 'Building and filling pybrain train set object...'\n self.train_set = ClassificationDataSet(len(self.input_features))\n\n for i, input_line in enumerate(self.train_inputs):\n self.train_set.addSample(self.train_inputs[i],\n [self.train_outputs[i] - 1])\n\n self.trainer = BackpropTrainer(self.network, dataset=self.train_set,\n momentum=0.5, weightdecay=0.0)\n\n self.train_set.assignClasses()", "def _initialize_trainer(self):\n self.cost = mse(0., 0.)\n for task_id in self.task_ids.keys():\n self.cost += self.model.get_layer(task_id + '-loss')\n\n opt = Optimizer(self.cost)\n self.optimizer = opt.get_adagrad(self.learning_rate)", "def trainNet():", "def initialize_training(self, training_info):\n self.model.reset_weights()\n self.algo.initialize(self.settings, model=self.model, environment=self.environment, device=self.device)", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialize_network(self):\n if self.trainer is None:\n # -- Initialize from beginning and start training, since no model is provided -- #\n super().initialize_network() # --> This updates the corresponding variables automatically since we inherit this class\n \n # -- Create a Multi Head Generic_UNet from the current network using the provided split and first task name -- #\n # -- Do not rely on self.task for initialization, since the user might provide the wrong task (unintended), -- #\n # -- however for self.plans, the user needs to extract the correct plans_file path by himself using always the -- #\n # -- first task from a list of tasks since the network is build using the plans_file and thus the structure might vary -- #\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n # -- Add the split to the already_trained_on since it is simplified by now -- #\n self.already_trained_on[str(self.fold)]['used_split'] = self.mh_network.split\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n return # Done with initialization\n\n # -- Some sanity checks and loads.. -- #\n # -- Check if the trainer contains plans.pkl file which it should have after sucessfull training -- #\n if 'fold_' in self.trainer.output_folder:\n # -- Remove the addition of fold_X from the output_folder, since the plans.pkl is outside of the fold_X directories -- #\n plans_dir = self.trainer.output_folder.replace('fold_', '')[:-1]\n else:\n # -- If no fold_ in output_folder, everything is fine -- #\n plans_dir = self.trainer.output_folder\n \n assert isfile(join(plans_dir, \"plans.pkl\")), \"Folder with saved model weights must contain a plans.pkl file..\"\n\n # -- Check that the trainer type is as expected -- #\n assert isinstance(self.trainer, (nnUNetTrainerV2, nnUNetTrainerMultiHead)), \"The trainer needs to be nnUNetTrainerV2 or nnUNetTrainerMultiHead..\"\n\n # -- If the trainer is already of Multi Head type, there should also be a pkl file with the sets it has already been trained on ! -- #\n if isinstance(self.trainer, nnUNetTrainerMultiHead): # If model was trained using nnUNetTrainerV2, the pickle file won't exist\n self.already_trained_on = load_json(join(self.trained_on_path, self.extension+'_trained_on.json'))\n \n # -- Load the model and parameters -- #\n # -- NOTE: self.trainer is a Multi Head Network, so it has a model, body and heads. -- #\n print(\"Loading trainer and setting the network for training\")\n self.trainer.load_final_checkpoint(train=True) # Load state_dict of the final model\n\n # -- Set mh_network -- #\n # -- Make it to Multi Head network if it is not already -- #\n # -- Use the first task in tasks_joined_name, since this represents the corresponding task name, whereas self.task -- #\n # -- is the task to train on, which is not equal to the one that will be initialized now using a pre-trained network -- #\n # -- (prev_trainer). -- #\n if isinstance(self.trainer, nnUNetTrainerV2):\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.trainer.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n else: # Already Multi Head type\n self.mh_network = self.trainer#.mh_network\n # -- Ensure that the split that has been previously used and the current one are equal -- #\n # -- NOTE: Do this after initialization, since the splits might be different before but still lead to the same level after -- #\n # -- simplification. -- #\n prev_split = self.already_trained_on[str(self.fold)]['used_split']\n assert self.mh_network.split == prev_split,\\\n \"To continue training on the fold {} the same split, ie. \\'{}\\' needs to be provided, not \\'{}\\'.\".format(self.fold, self.mh_network.split, prev_split)\n # -- Delete the prev_split --> not necessary anymore -- #\n del prev_split\n \n # -- Set self.network to the model in mh_network --> otherwise the network is not initialized and not in right type -- #\n self.network = self.mh_network.model", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def initialize(self, training=True, force_load_plans=False, num_epochs=500, prev_trainer=None):\n # -- The Trainer embodies the actual model that will be used as foundation to continue training on -- #\n # -- It should be already initialized since the output_folder will be used. If it is None, the model will be initialized and trained. -- #\n # -- Further the trainer needs to be of class nnUNetTrainerV2 or nnUNetTrainerMultiHead for this method, nothing else. -- #\n # -- Set prev_trainer correctly as class instance and not a string -- #\n self.trainer = prev_trainer\n\n # -- Set nr_epochs to provided number -- #\n self.max_num_epochs = num_epochs\n\n # -- Initialize the trained_on_tasks and load trained_on_folds -- #\n trained_on_tasks = list()\n trained_on_folds = self.already_trained_on.get(str(self.fold), list())\n \n # -- Reset the trained_on_tasks if the trained_on_folds exist for the current fold -- #\n if isinstance(trained_on_folds, dict):\n trained_on_tasks = trained_on_folds.get('finished_training_on', list())\n\n # -- The new_trainer indicates if the model is a new multi head model, -- #\n # -- ie. if it has been trained on only one task so far (True) or on more than one (False) -- #\n if len(trained_on_tasks) > 1:\n self.new_trainer = False\n else:\n self.new_trainer = True\n \n super().initialize(training, force_load_plans) # --> This updates the corresponding variables automatically since we inherit this class", "def _initial_setup(self, **train_kwargs):\n super(NetworkValidationBase, self)._initial_setup(**train_kwargs)", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def init_learner(self,**kwargs):\r\n \r\n if self.learn_type == 'nn':\r\n #initialize neural network\r\n shape = kwargs[\"shape\"]\r\n #initialize input layer\r\n model = Sequential() \r\n #add hidden layers\r\n for i in range(len(shape)):\r\n if i == 0:\r\n nb_input = self.size\r\n else:\r\n nb_input = shape[i -1]\r\n nb_output = shape[i]\r\n model.add(Dense(nb_input,nb_output,init=\"he_normal\",\r\n activation = \"tanh\"))\r\n model.add(Dropout(.5))\r\n model.add(Dense(shape[-1],1,init = \"he_normal\",\r\n activation = \"linear\"))\r\n model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')\r\n self.learner = model\r\n \r\n elif self.learn_type == 'linear':\r\n #initialize parameter\r\n self.learner = Linear(self.size,**kwargs)", "def __init__(self):\n self.num_examples_per_epoch = 99999\n self.optimizer = \"Adam\"\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 0.0001\n self.learning_rate_decay_factor = 0.5\n self.num_epochs_per_decay = 8.0\n\n # Learning rate when fine tuning the Inception v3 parameters.\n self.train_inception_learning_rate = 0.0001\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 5000", "def initialise_network(self):\n raise NotImplementedError", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def __init__(self):\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = None \n\n # Optimizer for training the model.\n self.optimizer = \"SGD\" #default \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0 # default 2.0\n self.learning_rate_decay_factor = 0.8\n self.num_epochs_per_decay = 4 #default 8\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 2", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def __init__(self, encoded_network, input_shape, n_classes, batch_size=256,\n log_path=\"./trainer\", mu=0.5, rho=0.5, variable_scope=\"cnn\"):\n super(EarlyStopNASTrainer, self).__init__(\n encoded_network=encoded_network,\n input_shape=input_shape,\n n_classes=n_classes,\n batch_size=batch_size,\n log_path=log_path,\n variable_scope=variable_scope\n )\n # Custom variables for the refined accuracy in BlockQNN implementation\n # pylint: disable=invalid-name\n self.mu = mu\n self.rho = rho\n\n # Updated during training call\n self.density = None\n self.flops = None\n\n # Build the estimator\n self._set_estimator()", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def __init__(self, encoded_network, input_shape, n_classes, batch_size=256,\n log_path=\"./trainer\", variable_scope=\"custom\"):\n super(DefaultNASTrainer, self).__init__(\n encoded_network=encoded_network,\n input_shape=input_shape,\n n_classes=n_classes,\n batch_size=batch_size,\n log_path=log_path,\n variable_scope=variable_scope\n )\n self._set_estimator()", "def __init__(self, batch_size=1, epochs=None, learning_rate=None, momentum=None, weights_name=''):\n self.batch_size = batch_size\n self.epochs = epochs\n self.model = None\n self.optimizer = None\n self.cb = None\n self.lr = learning_rate\n self.momentum = momentum\n self.weights_name = weights_name", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def start_training(self):\n self.training = True", "def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def __init__(self, **kwargs):\n #super(Net, self).__init__()\n nn.Module.__init__(self)\n # Build CNN\n module, shapes, optim = build_neuron_network(**kwargs)\n self._configuration = kwargs\n self.add_module('cnn', module)\n self.shapes = shapes\n # Loss and optimization\n self.criterion = nn.MSELoss(reduction='mean')\n self.optimizer = optim\n self._kwargs = kwargs", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]", "def test_n_and_train(self):\r\n\r\n n = NeuronNetwork(1,\r\n [1],\r\n [[[0.0,0.0]]],\r\n [[0.0]])\r\n\r\n inputs = [[0,0], [0,1], [1,0], [1,1]]\r\n targets = [[0], [0], [0], [1]]\r\n\r\n n.train(inputs,targets,1000,180)\r\n\r\n print(n)\r\n self.assertLess(n.feed_forward([0,0]), [0.001])\r\n self.assertGreater(n.feed_forward([1,0]), [0.001])\r\n self.assertGreater(n.feed_forward([0,1]), [0.001])\r\n self.assertGreater(n.feed_forward([1,1]), [0.9])", "def __init__(self):\n #self.NN = Neural_Network()\n y_vals = pd.read_csv('training_data_y.csv')\n x_vals_original = pd.read_csv('training_data_x.csv')\n x_vals_original.columns = ['R1', 'G1', 'B1', 'W1', 'R2', 'G2', 'B2', 'W2', 'R3', 'G3', 'B3', 'W3']\n total_x_train = self.getNewDF_X(x_vals_original)\n total_y_train = self.getNewDF_Y(y_vals) \n #training data is numpy arrays here\n x_arr = np.asarray(total_x_train,dtype=np.float32)\n y_train = np.asarray(total_y_train,dtype=np.float32)\n #convert training data to tensors and scale it\n x_train = torch.tensor((x_arr), dtype=torch.float)\n self.x_train = self.scaleInputTestData(x_train)\n self.y_train = torch.tensor((y_train), dtype=torch.float) / 100", "def init_training(self):\n\n if not os.path.exists(self._model_root_path):\n os.makedirs(self._model_root_path)\n\n # Only initialize once!\n if self._model is None:\n self._model = TrainableAimbotModel(self._config, self._fov,\n os.path.join(self._model_root_path, 'aimbot_model.tf'))\n\n if not os.path.isfile(self._train_data_tfrecord_path) and not os.path.isfile(self._test_data_tfrecord_path):\n # Only create if not existing\n images_labels = _get_annotations_and_images(self._image_path)\n images_labels_train, images_labels_test = train_test_split(images_labels, shuffle=True, test_size=0.20)\n\n self._model.create_tfrecords(self._train_data_tfrecord_path, images_labels_train)\n self._model.create_tfrecords(self._test_data_tfrecord_path, images_labels_test)\n\n self._train_data_set = self._model.create_dataset(self._train_data_tfrecord_path, augment=True, shuffle=True)\n self._test_data_set = self._model.create_dataset(self._train_data_tfrecord_path)", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def setup(self):\n if not os.path.exists(self.save_dir):\n os.mkdir(self.save_dir)\n\n if self.configuration['load_checkpoint'] >= 0:\n last_checkpoint = self.configuration['load_checkpoint']\n else:\n last_checkpoint = -1\n\n if last_checkpoint >= 0:\n # enable restarting training\n self.load_networks(last_checkpoint)\n if self.is_train and self.scheduler != 'plateau':\n self.load_optimizers(last_checkpoint)\n for o in self.optimizers:\n o.param_groups[0]['lr'] = o.param_groups[0]['initial_lr'] # reset learning rate\n\n self.schedulers = [get_scheduler(optimizer, self.configuration) for optimizer in self.optimizers]\n\n if last_checkpoint > 0 and self.scheduler != 'plateau':\n for s in self.schedulers:\n for _ in range(last_checkpoint):\n s.step()\n\n self.print_networks()", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test_earlystop\".format(\n workspace=workspace_dir\n )", "def train(self, training_data, testData, classNum, batchSize):\n # find the numbers for feature and label\n featureNum = training_data.shape[1] - 1\n\n # #this will find all the unique labels automatically, but will have problem when training data is lacking some labels\n # labelNum = len(np.unique(training_data[:, :1]))\n labelNum = classNum\n\n # get the number of nodes for each layer\n if \"hidden_layer\" in self.params and self.params[\"hidden_layer\"] is not None:\n nodeNum = [featureNum] + self.params[\"hidden_layer\"] + [labelNum]\n else:\n nodeNum = [featureNum, featureNum * 2, labelNum]\n\n # get the mode for initializing the weight\n if \"weightInitMode\" in self.params and self.params[\"weightInitMode\"] is not None:\n weightInitMode = self.params[\"weightInitMode\"]\n else:\n weightInitMode = None\n\n # get the momentum factor\n if \"momentumFactor\" in self.params:\n momentumFactor = self.params[\"momentumFactor\"]\n else:\n momentumFactor = 0.0\n\n self.clf = NeuralNetwork(training_data, nodeNum, weightInitMode, momentumFactor)\n iteration = 5\n totalIter = 0\n testSize = 100000\n while iteration > 0:\n\n if iteration < 10:\n self.clf.train(iteration, batchSize)\n totalIter += iteration\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration = 0\n\n while iteration >= testSize:\n self.clf.train(testSize, batchSize)\n totalIter += testSize\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration -= testSize\n\n if iteration > 0:\n self.clf.train(iteration, batchSize)\n totalIter += iteration\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration = 0\n\n print \"\"\n restart = raw_input(\"Do you want to restart? (Y/N)\")\n if restart.upper() == \"Y\":\n totalIter = 0\n print \"Current Alpha is\", self.clf.getAlpha()\n alpha = raw_input(\"What alpha ?\")\n self.clf.setAlpha(float(alpha))\n self.clf.initTheta()\n self.file.write(\"\\n\")\n self.file.write(\"*****************************************************\\n\")\n self.file.write(\"Re-initialize trail with alpha = \" + str(alpha) + \"\\n\")\n self.file.write(\"*****************************************************\\n\")\n\n print \"\"\n iteration = raw_input(\"How many iteration do you want to train the model?\")\n try:\n iteration = int(iteration)\n except:\n iteration = raw_input(\"Please input an integer\")\n iteration = 1\n print \"Total training iterations:\", totalIter", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test\".format(\n workspace=workspace_dir\n )", "def setup_training(self):\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.total_rewards = []\n self.rewards = []\n self.steps = []\n self.average_rewards = []\n self.average_steps = []\n self.model = initialize_model()\n self.invalid_actions = 0\n self.average_invalid_actions = []\n self.total_invalid_actions = []", "def init_model(self, num_inputs, num_outputs):\n self.number_inputs = num_inputs\n\n self.number_outputs = num_outputs\n\n self.last_state = None\n\n self.model.start_nn(num_inputs, num_outputs)\n \n self.is_model_init = True", "def train(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.train()", "def initialize(self, finetune_lr=0.1):\n\n inputs = Tensor.matrix('inputs')\n outputs = Tensor.ivector('outputs')\n minibatch_index = Tensor.lscalar('minibatch_index')\n\n self.training_function = self.compiled_training_function(\n self.classifier,\n minibatch_index,\n inputs,\n outputs,\n finetune_lr\n )\n self.validation_eval_function = self.compiled_validation_function(\n self.classifier,\n minibatch_index,\n inputs,\n outputs\n )\n self.test_eval_function = self.compiled_test_function(\n self.classifier,\n minibatch_index,\n inputs,\n outputs\n )", "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def __init__(self, options, path):\n print('Prepare the network and data.')\n self._options = options\n self._path = path\n self._epoch = 0\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # Network.\n network = SCNN()\n #weight_init(network)\n network = network.to(self.device)\n # self._net = network.cuda()\n self._net = network\n #self._net = torch.nn.DataParallel(network)\n\n logspaced_LR = np.logspace(-1, -4, self._options['epochs'])\n # Load the model from disk.\n checkpoints_list = os.listdir(self._path['model'])\n if len(checkpoints_list) != 0:\n self._net.load_state_dict(torch.load(\n os.path.join(self._path['model'], '%s%s%s' % ('net_params', str(len(checkpoints_list) - 1), '.pkl'))))\n self._epoch = len(checkpoints_list)\n self._options['base_lr'] = logspaced_LR[len(checkpoints_list)]\n # self._net.load_state_dict(torch.load(self._path['model']))\n print(self._net)\n # Criterion.\n self._criterion = torch.nn.CrossEntropyLoss().cuda()\n # Solver.\n self._solver = torch.optim.SGD(\n self._net.parameters(), lr=self._options['base_lr'],\n momentum=0.9, weight_decay=self._options['weight_decay'])\n # self._solver = torch.optim.Adam(\n # self._net.parameters(), lr=self._options['base_lr'],\n # weight_decay=self._options['weight_decay'])\n lambda1 = lambda epoch: logspaced_LR[epoch]\n self._scheduler = torch.optim.lr_scheduler.LambdaLR(self._solver, lr_lambda=lambda1)\n\n self.train_transforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(size=256), # Let smaller edge match\n torchvision.transforms.RandomHorizontalFlip(),\n torchvision.transforms.RandomCrop(size=224),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n ])\n self.test_transforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(size=256),\n torchvision.transforms.CenterCrop(size=224),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n ])\n self.train_data = ImageDataset(csv_file=os.path.join(path['kadis'], 'train.txt'),\n img_dir=os.path.join(path['kadis'], 'dist_imgs'),\n transform=self.train_transforms,\n test=False)\n self._train_loader = torch.utils.data.DataLoader(\n self.train_data, batch_size=self._options['batch_size'],\n shuffle=True, num_workers=1, pin_memory=True)\n self.test_data = ImageDataset(csv_file=os.path.join(path['kadis'], 'test.txt'),\n img_dir=os.path.join(path['kadis'], 'dist_imgs'),\n transform=self.test_transforms,\n test=True)\n self._test_loader = torch.utils.data.DataLoader(\n self.test_data, batch_size=self._options['batch_size'],\n shuffle=False, num_workers=1, pin_memory=True)", "def initialize_if_not(self, training=False):\r\n if self._initialized:\r\n return\r\n\r\n # Build supporting operations\r\n with tf.variable_scope('savers'):\r\n self.checkpoint.build_savers() # Create savers\r\n if training:\r\n with tf.variable_scope('optimize'):\r\n self._build_optimizers()\r\n\r\n # Start pre-processing routines\r\n for _, datasource in self._train_data.items():\r\n datasource.create_and_start_threads()\r\n\r\n # Initialize all variables\r\n self._tensorflow_session.run(tf.global_variables_initializer())\r\n self._initialized = True", "def train_network(self):\n batch = self.memory.sample(self.batch_size)\n inputs = np.array([b[\"state\"] for b in batch]) #####\n actions = np.array([b[\"action\"] for b in batch])\n rewards = np.array([b[\"reward\"] for b in batch])\n next_inputs = np.array([b[\"next_state\"] for b in batch])\n\n actions_one_hot = np.eye(self.action_space_size)[actions]\n\n next_qvalues = np.squeeze(self.target_network.model(next_inputs))\n targets = rewards + self.discount * np.amax(next_qvalues, axis=-1)\n\n self.online_network.train_step(inputs, targets, actions_one_hot)", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def train_setup(additional_arg_parser=None, args=None):\n if args is None:\n args = parse_input_arguments(additional_arg_parser)\n if args.do_eval or args.do_test:\n args.load_pretrained = True\n if args.load_pretrained and args.pretrained_checkpoint == '':\n raise ValueError('Must provide --pretrained_checkpoint when using --load_pretrained')\n if args.eval_batch_size == 0:\n args.eval_batch_size = args.train_batch_size\n if args.load_pretrained:\n args.save_dir = \"/\".join(args.pretrained_checkpoint.split('/')[:-1])\n else:\n args.save_dir = get_save_dir(args.save_dir, args.run_name)\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n args.start_epoch = 0\n args.start_step = 0\n\n split_name = 'train' if args.do_train else 'validation' if args.do_eval else 'test'\n logger = get_logger(args.save_dir, 'log_train')\n\n logger.info(\"local_rank: %d, node_index: %d, gpu_per_node: %d\"%(args.local_rank, args.node_index, args.gpu_per_node))\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.local_rank += args.node_index * args.gpu_per_node\n args.n_gpu = 1\n args.device = device\n\n logger.info(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,\n torch.distributed.get_world_size() if args.local_rank != -1 else 1)\n\n set_seed(args)\n\n return args, logger", "def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def setup_net(self):\n pass", "def initialise_weights(self): \n \n def initialise_process(param):\n \n \"\"\"\n Initialises weights of a given parameter following either Xavier or Kaiming uniform or normal processes.\n \n : param (torch.Tensor):\n \n \"\"\"\n \n if self._initialisation_process == 'xavier_uniform':\n tnni.xavier_uniform_(param.data)\n elif self._initialisation_process == 'xavier_normal':\n tnni.xavier_normal_(param.data)\n elif self._initialisation_process == 'kaiming_uniform':\n tnni.kaiming_uniform_(param.data)\n elif self._initialisation_process == 'kaiming_normal':\n tnni.kaiming_normal_(param.data)\n \n if self._initialisation_process is not None:\n for m in self.modules():\n # Embedding\n if type(m) is nn.Embedding:\n tnni.normal_(self.embedding.weight)\n # RNN\n elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]: \n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n initialise_process(param)\n #torch.nn.init.kaiming_normal_(param.data)\n elif 'weight_hh' in name:\n tnni.orthogonal_(param.data)\n elif 'bias' in name:\n # Bias initialised with zero will get the bias from\n # the forget gate\n param.data.fill_(0.0)\n param.data[self._hidden_size:self.directions*self._hidden_size].fill_(1.0)\n # Attention linear layer\n elif type(m) is nn.Linear:\n for name, param in m.named_parameters():\n if 'weight' in name:\n initialise_process(param.data)\n elif 'bias' in name:\n param.data.normal_()", "def setup_training(self):\n \n training_batch_size = self.mini_batch_size\n \n cost = self.cnn.get_default_cost()\n \n data_specs = cost.get_data_specs(self.cnn)\n mapping = DataSpecsMapping(data_specs)\n space_tuple = mapping.flatten(data_specs[0], return_tuple=True)\n source_tuple = mapping.flatten(data_specs[1], return_tuple=True)\n \n theano_args = []\n for space, source in safe_zip(space_tuple, source_tuple):\n name = '%s[%s]' % (self.__class__.__name__, source)\n arg = space.make_theano_batch(name=name,\n batch_size=training_batch_size).astype(\"float32\")\n theano_args.append(arg)\n theano_args = tuple(theano_args)\n \n y_hat = self.cnn.fprop(theano_args[0])\n \n self.fprop_func = theano.function([theano_args[0]], y_hat)\n \n cost = self.cnn.cost(theano_args[1], y_hat)\n \n lr_scalers = self.cnn.get_lr_scalers()\n \n params = list(self.cnn.get_params())\n grads = T.grad(cost, params, disconnected_inputs='ignore')\n \n gradients = OrderedDict(izip(params, grads))\n \n rms_vals_dict = OrderedDict(izip(params, self.rms_vals))\n \n updates = OrderedDict()\n \n updates.update(dict(safe_zip(params, [param - self.learning_rate * \n (gradients[param] / \n T.sqrt(rms_vals_dict[param] + 1e-8)) \n for param in params])))\n \n rmsprop_updates = OrderedDict()\n \n rmsprop_updates.update(dict(safe_zip(self.rms_vals, [(rms_vals_dict[param] * .9) + \n (T.sqr(gradients[param]) * .1)\n for param in params])))\n \n self.training = theano.function(theano_args, updates=updates, \n on_unused_input='ignore')\n \n self.rmsprop_update = theano.function(theano_args, updates=rmsprop_updates,\n on_unused_input='ignore')\n \n temp = T.tensor4()\n \n self.dimshuf_func = theano.function([temp], temp.dimshuffle(1, 2, 3, 0))\n \n #self.grads_func = theano.function(theano_args, grads)\n \n self.cost_function = theano.function(theano_args, cost)", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def train(self):\n self.training = True", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def initialize_network(self, model, num_init=None, **net_args):\n\n self.net_args = net_args\n\n if num_init is None:\n self.num_init = 1\n else:\n self.num_init = num_init\n\n nets = []\n for i in range(self.num_init):\n nets.append( model(dim_inp=self.dim_inp, \n dim_out=self.dim_out, **net_args) )\n\n return nets", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def _train(self, dataset):\n net = buildNetwork(\n dataset.params_length,\n self._default_hidden_layers,\n 1 # a binary classifier only requires one output layer\n )\n ds = SupervisedDataSet(dataset)\n trainer = BackpropTrainer(net, ds)\n trainer.trainUntilConvergence()\n net.activate(params.as_serialized)", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def initialize_setup(self, init_lr):\n param_list = []\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n param_list.append(param)\n\n self.optimizer = torch.optim.AdamW(\n param_list, lr=init_lr, eps=1e-6)\n\n self.optim_scheduler = get_linear_schedule_with_warmup(\n self.optimizer, num_warmup_steps=0,\n num_training_steps=len(self.train_examples) * self.max_epochs)\n\n if not path.exists(self.model_path):\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n # Try to initialize the mention model part\n if path.exists(self.pretrained_mention_model):\n print(\"Found pretrained model!!\")\n checkpoint = torch.load(self.pretrained_mention_model)\n self.model.load_state_dict(checkpoint['model'], strict=False)\n else:\n logger.info('Loading previous model: %s' % self.model_path)\n # Load model\n self.load_model(self.model_path)", "def __init__(self, num_learners: int):\n self.num_learners = num_learners\n self.learners = []\n self.learner_weights = np.ones(num_learners)", "def __init__(self, num_models: int, num_classes: int):\n self.nun_models = num_models\n self.num_classes = num_classes\n self.model: keras.Model = self.init_model()", "def __init__(self):\n self.weights = None\n self._epsilon = None\n self._num_training = None\n self._lambda = None\n return None", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def start_training(self):\n if self.task_env is None:\n rospy.logfatal(\"No task environment found for training.\")\n if self.agent is None:\n rospy.logfatal(\"No agent found for training.\")\n self.agent.start_training()", "def __init__(self,layers,activations):\n model = utils.buildMLP(layers, activations)\n super().__init__(torch.nn.Sequential(model), nnType='dnn')", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def __init__(self, hidden_layer_sizes, activation='relu', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Multi-layer Perceptron\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.activation = activation\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=self.reg, max_iter=1000, \n random_state=self.random_state)", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def set_train(self):\n self.model.train()" ]
[ "0.7842906", "0.769013", "0.75414234", "0.74997467", "0.7447813", "0.7415171", "0.73304015", "0.73236835", "0.7239312", "0.7162911", "0.71530294", "0.7108662", "0.706298", "0.70624375", "0.70430005", "0.70430005", "0.70291543", "0.70126384", "0.6987266", "0.69770414", "0.6969593", "0.68761635", "0.687224", "0.6855854", "0.6811921", "0.68066216", "0.6805928", "0.68021816", "0.6782644", "0.67764896", "0.6773019", "0.677077", "0.6763655", "0.67632014", "0.67602026", "0.6753927", "0.67388356", "0.67376924", "0.67376924", "0.67376924", "0.67376924", "0.67376924", "0.67317986", "0.67260337", "0.672543", "0.67224175", "0.67173034", "0.6705868", "0.67025787", "0.66994655", "0.66895705", "0.6689539", "0.66879636", "0.6682377", "0.6671328", "0.6638996", "0.66144764", "0.66033393", "0.6594238", "0.65786666", "0.65765905", "0.65756136", "0.65527236", "0.65475434", "0.6545411", "0.654352", "0.65357476", "0.6534177", "0.6530656", "0.6530266", "0.65255916", "0.65164715", "0.6510631", "0.65099245", "0.64985466", "0.6498203", "0.64859813", "0.6475989", "0.64746785", "0.64724934", "0.6467403", "0.64648426", "0.64647025", "0.6459306", "0.64585286", "0.64582413", "0.64578664", "0.6455184", "0.6454412", "0.6452216", "0.6450752", "0.6447673", "0.6447673", "0.6447673", "0.6447673", "0.6447673", "0.644539", "0.64449185", "0.64311373", "0.64288723", "0.64288527" ]
0.0
-1
A subfunction that ensuring the train mode.
def _set_train(self): if not self.model.__dict__['training']: self.model.train()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_training(self):\n return self.mode == \"train\"", "def is_training(self):\n return self.mode == \"train\"", "def train():\n pass", "def test(self):\n self.training = False", "def train(self):\n self.training = True", "def train(self)->None:", "def train(self, mode: bool = True):\n if self.nn_module.training != mode:\n self.nn_module.train(mode)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def test_training(self):\n\t\tpass", "def train(self):\n\t\traise NotImplementedError", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train_step(self):\n pass", "def set_mode_train(self):\n self._set_mode('train')\n return self", "def set_train_mode(training, mnet, hnet, hhnet, dis):\n for net in [mnet, hnet, hhnet, dis]:\n if net is not None:\n if training:\n net.train()\n else:\n net.eval()", "def train(self):\n return", "def train(self, mode=True):\n super(SwinTransformer, self).train(mode)\n self._freeze_stages()", "def train(self):\n raise NotImplementedError", "def train_one_epoch(self):\n raise NotImplementedError", "def train(self, ):\n raise NotImplementedError", "def train(self):\n raise NotImplementedError()", "def trainModel( self, featureTrain, classTrain):", "def model_switch_to_training(self):\n pass", "def training(self):\n self.training = True", "def TrainOneStep(self):\n pass", "def trainable(self):\n return True", "def _training_before_hook(self):\n pass", "def train_mode(self, loss_D, loss_G):\n \"\"\"epslon = 1e-5 to avoid loss = 0 \"\"\"\n \"\"\"#@(chuanzi): not loss = 0 in original occasion\"\"\"\n ratio = loss_D.data[0]/(loss_G.data[0] + 1e-5)\n if ratio < 1e-1 and self.train_D:\n self.train_D = False\n self.train_G = True\n if ratio > 5e-1 and not self.train_D:\n self.train_D = True\n self.train_G = True\n if ratio > 1e-1 and self.train_G:\n self.train_G = False\n self.train_D = True\n print ( \"train_D=%d, train_G=%d\" % (self.train_D, self.train_G))", "def eval(self):\n self.train(mode=False)", "def train(self, mode: bool = True) -> None:\n super().train(mode=mode)\n if mode:\n self.mean_module = None\n self.covar_module = None\n self.likelihood = None\n self.task_covar_module = None", "def evaluate(self):\n self.training = False", "def start_training(self):\n self.training = True", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def set_train(self):\n self.train()\n self.volatile = False", "def train_naive(): # add arguments as needed\n pass", "def train(self, training_steps=10):", "def is_trainable(self):\n return False", "def train(self) -> Any:\n pass", "def _precall(self, testdataset, trainingdataset=None):\n if not trainingdataset is None:\n if self.__train:\n # XXX can be pretty annoying if triggered inside an algorithm\n # where it cannot be switched of, but retraining might be\n # intended or at least not avoidable.\n # Additonally isTrained docs say:\n # MUST BE USED WITH CARE IF EVER\n #\n # switching it off for now\n #if self.__clf.isTrained(trainingdataset):\n # warning('It seems that classifier %s was already trained' %\n # self.__clf + ' on dataset %s. Please inspect' \\\n # % trainingdataset)\n if self.states.isEnabled('training_confusion'):\n self.__clf.states._changeTemporarily(\n enable_states=['training_confusion'])\n self.__clf.train(trainingdataset)\n if self.states.isEnabled('training_confusion'):\n self.training_confusion = self.__clf.training_confusion\n self.__clf.states._resetEnabledTemporarily()\n\n if self.__clf.states.isEnabled('trained_labels') and \\\n not testdataset is None:\n newlabels = Set(testdataset.uniquelabels) \\\n - Set(self.__clf.trained_labels)\n if len(newlabels)>0:\n warning(\"Classifier %s wasn't trained to classify labels %s\" %\n (`self.__clf`, `newlabels`) +\n \" present in testing dataset. Make sure that you have\" +\n \" not mixed order/names of the arguments anywhere\")\n\n ### Here checking for if it was trained... might be a cause of trouble\n # XXX disabled since it is unreliable.. just rely on explicit\n # self.__train\n # if not self.__clf.isTrained(trainingdataset):\n # self.__clf.train(trainingdataset)\n # elif __debug__:\n # debug('CERR',\n # 'Not training classifier %s since it was ' % `self.__clf`\n # + ' already trained on dataset %s' % `trainingdataset`)", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self, mode=True):\n super(Encoder, self).train(mode)\n self.apply(freeze_batchnorm)", "def set_train(self):\n self.train()\n self.volatile = False\n self.scheduled_sampling = self.sample_prob != 0", "def __validate__(self):\n if self.train:\n assert self.random is not None", "def train(self, trainData):\n pass", "def finetuned():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='ft', dropout=0.7304, learning_rate=0.0000976)", "def train(self, batch):\n pass", "def train(self, training_data):\n pass", "def train(self, mode: bool = True):\n T = super().train(mode=mode)\n if mode:\n self.graph_construction()\n return T", "def train(self):\n raise NotImplemented()", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def test_diff_trainability(self):\n self.run_subtests(\n {\n \"multi_tensor\": [False, True],\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_diff_trainability,\n )", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def trainNet():", "def before_fit(self):\n self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, \"gather_preds\")\n if not self.run:\n return\n\n # Prepare ground truth container, set here as y_true's always stay the same\n self.y_true = []", "def _setup_recursive_train_modes(self, batch_type):\n mode = 'training'\n # If each component is in encoding mode\n if all(v == 'encoding' for v in batch_type.values()):\n mode = 'inference'\n return mode", "def _check_mode_valid(mode):\n if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and\n mode != model_fn.ModeKeys.EVAL):\n raise ValueError(\"mode=%s unrecognized.\" % str(mode))", "def train(self, x={}, **kwargs):\n return 0", "def train(self, trainfile):", "def trainSession(self, session):\n pass", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def train(self, train_loader):\n pass", "def train(self, num_batches: int):", "def _set_learning_phase(self, train: bool = False):\n if train:\n self.net_q.train()\n self.net_ps.train()\n self.net_k.train()\n else:\n self.net_q.eval()\n self.net_ps.eval()\n self.net_k.eval()", "def set_trainable(model, train):\r\n model.trainable = train\r\n for l in model.layers:\r\n l.trainable = train", "def train(self, training_data, cfg, **kwargs):\n pass", "def train_model(self):\n self.best_epoch = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n self.best_f1 = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n for t in self.topic:\n if t != 'other':\n for st in self.topic2sub_topic[t].keys():\n\n print(\"Now training the classsfier for topic: \", t, \" ; intent: \", st)\n print(128 * \"=\")\n print(\"Input: str; Output: boolean(if the str contents the intent: \", st, \" ).\")\n print(64 * \"-\")\n X, y = self.get_data(t, st)\n print(\"data_loaded!\")\n X_train, X_dev, y_train, y_dev = self.my_train_test_split(X, y)\n best_f1 = 0\n for e in range(1,10):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=[1024, ]))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='relu'))\n model.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=[metrics.mae, metrics.categorical_accuracy])\n model.fit(X_train, y_train, epochs=e, batch_size=128)\n print(\"f1_score on dev set: \")\n f1 = self.f1_score_model(model, X_dev, y_dev)[0]\n if f1 > best_f1:\n self.model_zoo[t][st] = model\n model.save_weights(self.trained_w_folder+\"/%s/%s.h5\" %(t,st))\n self.best_epoch[t][st] = e\n self.best_f1[t][st] = f1\n best_f1 = f1\n\n print(64*\"=\")\n print()", "def _train_model(self):\n raise NotImplementedError()", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def end_training(self):\n self.training = False", "def train(entropy_fn):\n del entropy_fn # unused\n return _make_modules(is_train=True)", "def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n (x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.categories_[0]))\n model.compile(\n loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),\n #metrics=['categorical_accuracy'],\n optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001\n #optimizer=optimizers.Adam(learning_rate=5e-4)\n )\n # early stopping callback using validation loss \n callback = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n min_delta=0,\n patience=5,\n verbose=0,\n mode=\"auto\",\n baseline=None,\n restore_best_weights=True,\n )\n #callback = EarlyStoppingAtMaxMacroF1(\n # patience=100, # record all epochs\n # validation=(x_va, y_va)\n #)\n\n print('start training')\n history = model.fit(x_train, y_train,\n batch_size=self.model_cfg['batch_size'],\n epochs=100,\n validation_split=va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else None,\n callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(f'finished training in {len(history.history[\"loss\"])} epochs')\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n # return training history \n return history.history", "def set_mode(self, mode):\n if mode == 'train':\n self.net.train()\n elif mode == 'eval':\n self.net.eval()\n else:\n raise ValueError(\n \"Got invalid mode '{}'. Valid options are 'train' and 'eval'.\".format(mode))", "def _training_after_hook(self):\n pass", "def train(self, x_train, y_train, x_val, y_val):\n pass", "def train(self, sess): \n\n logging.info(\"////////////////////////////\")\n logging.info(\"///// BEGIN TRAINING /////\")\n logging.info(\"////////////////////////////\")\n\n # for TensorBoard\n summaryWriter = tf.summary.FileWriter(\n \"./checkpoints/\", \n sess.graph)\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n # Print initial model predictions\n emaTrainLoss = self.get_loss(sess, dSet=\"train\")\n emaTrainAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n logging.info(\"Initial training Loss / Accuracy: %f / %f)\" % (emaTrainLoss, emaTrainAccr))\n logging.info(\"Initial validation Loss / Accuracy: %f / %f)\" % (valLoss, valAccr))\n\n randomRatio = 1.0\n epoch = 0\n best_val_loss = None\n best_val_acc = None\n\n\n ###### Loop over epochs #####\n while (self.FLAGS.Nepochs is 0) or (epoch <= self.FLAGS.Nepochs):\n epoch += 1\n epoch_tic = time.time()\n\n # Evaluate test and validation data\n trnLoss = self.get_loss(sess, dSet=\"train\")\n trnAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n print_info = \"Full Sets\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (trnLoss, trnAccr, valLoss, valAccr)\n logging.info(\"\\n\\n///// Begin Epoch {} /////\\n\".format(epoch)\n + print_info)\n\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n ##### Loop over mini batches #####\n while True:\n\n # Perform training step\n try :\n tstep_tic = time.time()\n curLoss, curAccr, global_step = self.run_train_step(sess, summaryWriter)\n tstep_toc = time.time()\n tstep_time = tstep_toc - tstep_tic\n except tf.errors.OutOfRangeError:\n break\n\n # Update training history parameters\n emaTrainLoss = curLoss*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainLoss*self.FLAGS.train_variable_decay \n emaTrainAccr = curAccr*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainAccr*self.FLAGS.train_variable_decay \n\n ### Evaluate model ###\n if global_step % self.FLAGS.eval_every == 0:\n\n # Save training data measurements\n self.writeSummary(emaTrainLoss, \"train/loss\", summaryWriter, global_step)\n self.writeSummary(emaTrainAccr, \"train/acc\", summaryWriter, global_step)\n self.history[\"step\"].append(global_step)\n self.history[\"trainLoss\"].append(emaTrainLoss)\n self.history[\"trainAccr\"].append(emaTrainAccr)\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n self.writeSummary(valLoss, \"val/loss\", summaryWriter, global_step)\n self.writeSummary(valAccr, \"val/acc\", summaryWriter, global_step)\n self.history[\"validLoss\"].append(valLoss)\n self.history[\"validAccr\"].append(valAccr)\n\n # Logging results\n print_info = \"%i\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (global_step, emaTrainLoss, emaTrainAccr, valLoss, valAccr)\n logging.info(print_info)\n\n # plot training progress\n self.plot_results()\n\n\n # Save model\n if global_step % self.FLAGS.save_every == 0:\n logging.info(\"Saving model at iteration {} to {}\".format(\n global_step, self.FLAGS.checkpoint_path))\n self.saver.save(sess, \n self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccs = self.get_accuracy(sess, dSet=\"val\")\n\n # Save best models\n if (best_val_loss is None) or (valLoss < best_val_loss):\n logging.info(\"Saving best loss model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_loss_ckpt_path))\n best_val_loss = valLoss\n self.bestLossSaver.save(sess, \n self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n if (best_val_acc is None) or (valAccs > best_val_acc):\n logging.info(\"Saving best accuracy model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_acc_ckpt_path))\n best_val_acc = valAccs\n self.bestAccSaver.save(sess, \n self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n loss_train = self.get_loss(sess, dSet=\"train\")\n acc_train = self.get_accuracy(sess, dSet=\"train\")\n\n loss_val = self.get_loss(sess, dSet=\"val\")\n acc_val = self.get_accuracy(sess, dSet=\"val\")\n\n print(loss_train, acc_train)\n if self.FLAGS.verbose:\n print(\"\\n\\n\")\n print(\"###########################\")\n print(\"##### Final Results #####\")\n print(\"###########################\")\n print(\"\\nTraining [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_train, acc_train))\n print(\"Validation [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_val, acc_val))\n \n self.hasTrained = True", "def check(self) -> None:\n # validate training config\n super().check()", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def training_step(self, **kwargs):\n raise NotImplementedError", "def train(self, mode=True):\n super(ImVoteNet, self).train(mode)\n if self.freeze_img_branch:\n if self.with_img_bbox_head:\n self.img_bbox_head.eval()\n if self.with_img_backbone:\n self.img_backbone.eval()\n if self.with_img_neck:\n self.img_neck.eval()\n if self.with_img_rpn:\n self.img_rpn_head.eval()\n if self.with_img_roi_head:\n self.img_roi_head.eval()", "def train_and_test(self, train_fn, test_fn):\n logging.info(\"Training..\")\n self.train(train_fn)\n logging.info(\"Testing..\")\n return self.test(test_fn)\n logging.info(\"Done!\")", "def pre_train(self, dataset, **kwargs):\n\n pass", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def _setup_misc(self, mode):\n self.lr_rate_ph = tf.Variable(0.0, name='lrn_rate', trainable=False)\n self.reuse = None if (mode == 'train') else True\n self.batch_size = self.hparams.batch_size\n if mode == 'eval':\n self.batch_size = 25", "def _is_train(self):\n return tf.placeholder(dtype=tf.bool,\n name='is_train')", "def setup_to_transfer_learn(model, base_model):\n for layer in base_model.layers:\n layer.trainable = False\n model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['categorical_accuracy', f1_score])", "def train_impl(\n self,\n train_processed_data_dir: Path,\n val_processed_data_dir: Path,\n ) -> NoReturn:\n pass", "def train(dataset: str, fold: str, img_size: int, is_ovo: bool, net_type: str, epochs: int, is_finetune: bool,\n train_percent: int, learning_rate: int, extra_info=\"\"):\n global init_learning_rate, _OVO_MATRIX_TRANSPOSED\n start = datetime.now()\n # übergebene Parameter auflisten\n print(20 * \"-\" + \"Parameter für das Training\" + 20 * \"-\")\n print(\"Datensatz: %s\" % dataset)\n print(\"Fold: %s\" % fold)\n print(\"Bildgröße: %s\" % img_size)\n print(\"Kodierung: %s\" % (\"OvO\" if is_ovo else \"OvA\"))\n print(\"Netz: %s\" % net_type)\n print(\"Epochen: %s\" % epochs)\n print(\"Gewichte: %s\" % (\"Finetune\" if is_finetune else \"Scratch\"))\n print(\"Prozentsatz des Trainingssplits: %s\" % train_percent)\n print(\"Initiale Learning-Rate: %f\" % learning_rate)\n print(66 * \"-\")\n\n # Learning-Rate setzen\n init_learning_rate = learning_rate\n\n # weights setzen (Scratch oder Pretrained mit Imagenet)\n weights = None\n if is_finetune:\n weights = \"imagenet\"\n # Klassenanzahl aus Datensatz-Name ableiten (Zahl am Ende des Datensatz-Namens ist Klassenanzahl)\n last_digits = 0\n for c in dataset[::-1]:\n if c.isdigit():\n last_digits += 1\n else:\n break\n\n num_classes = int(dataset[dataset.__len__() - last_digits:])\n print(\"Anzahl an Klassen: %s\" % num_classes)\n\n # Verschiedene Netz-Varianten\n\n if net_type.lower() in [\"resnet\", \"resnet50\", \"r\"]:\n net_type = \"R\"\n # Erste und letzte Schicht weglassen (include_top=False) und eigene Input-Shape\n model = keras.applications.resnet50.ResNet50(weights=weights, include_top=False,\n input_shape=(img_size, img_size, 3))\n out = model.output\n # vorletzte Schicht wieder herstellen (so wie sie im Original Netz auch wäre)\n out = keras.layers.GlobalAveragePooling2D()(out)\n elif net_type.lower() in [\"inception-pawara\", \"inceptionv3-pawara\", \"ip\"]:\n net_type = \"IP\"\n # Erste und letzte Schicht weglassen (include_top=False) und eigene Input-Shape\n model = keras.applications.inception_v3.InceptionV3(weights=weights, include_top=False,\n input_shape=(img_size, img_size, 3))\n\n # Letzte Schichten ändern wie im Code von Pawara et al.\n x = model.output\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n x = keras.layers.AveragePooling2D(pool_size=(8, 8))(x)\n x = keras.layers.Dropout(0.4)(x)\n out = keras.layers.Flatten()(x)\n elif net_type.lower() in [\"inception\", \"inceptionv3\", \"i\"]:\n net_type = \"I\"\n\n # Erste und letzte Schicht weglassen (include_top=False) und eigene Input-Shape\n model = keras.applications.inception_v3.InceptionV3(weights=weights, include_top=False,\n input_shape=(img_size, img_size, 3))\n out = model.output\n # vorletzte Schicht wieder herstellen (so wie sie im Original Netz auch wäre)\n out = keras.layers.GlobalAveragePooling2D()(out)\n\n else:\n print(\"Netz %s wird nicht unterstützt\" % net_type)\n exit(11)\n # Verzeichnis um alles zu diesem Modell zu speichern\n current_model_string = dataset + \",\" + str(img_size) + \",\" + (\n \"OvO\" if is_ovo else \"OvA\") + \",\" + net_type + \",\" + (\"F\" if is_finetune else \"S\") + \",\" + str(\n train_percent) + \",\" + str(epochs) + \",\" + str(fold) + \",\" + str(extra_info)\n\n # mehrere Folds zum gleichen Netz zusammenfassen in Unterordner\n current_model_folder_name = extra_info + \",\" + dataset + \",\" + str(img_size) + \",\" + (\n \"OvO\" if is_ovo else \"OvA\") + \",\" + net_type + \",\" + (\"F\" if is_finetune else \"S\") + \",\" + str(\n train_percent) + \",\" + str(epochs)\n save_dir = _WORK_DIR / \"saved_results\" / current_model_folder_name.replace(\",\", \"_\").replace(\".\", \",\") / str(fold)\n save_dir_cp = _WORK_DIR / \"saved_checkpoints\"\n cp_name = str(extra_info) + \",\" + current_model_string + \".cp\"\n\n if save_dir.exists():\n print(\"Der Ordner für die aktuelle Konfiguration existiert bereits!\")\n print(str(save_dir))\n exit(13)\n save_dir.mkdir(parents=True)\n save_dir_cp.mkdir(parents=True, exist_ok=True)\n optimizer = keras.optimizers.Adam(lr=get_learning_rate(0))\n\n # Datensatz laden\n x_train, y_train, x_test, y_test = load_dataset(dataset, fold, train_percent, is_ovo, img_size)\n\n steps_per_epoch = x_train.__len__() // _BATCH_SIZE if x_train.__len__() // _BATCH_SIZE > 0 else 1\n\n # Data Augmentation (bis zu 10% shiften vertikal und horizontal, horizontal spiegeln)\n if _DATA_AUGMENTATION:\n data_augmentation = keras.preprocessing.image.ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=0,\n width_shift_range=0.1,\n height_shift_range=0.1,\n horizontal_flip=True,\n vertical_flip=False)\n data_augmentation.fit(x_train)\n\n if is_ovo:\n # Y-Label müssen von Klassennummer (z.B. 5) zu OvO-Vektor kodiert werden\n get_ovo_matrix(num_classes) # speichert OvO-Matrix für passende Klassenanzahl in globale Variable _OVO_MATRIX\n y_train = convert_labels_to_ovo(y_train, num_classes)\n y_test = convert_labels_to_ovo(y_test, num_classes)\n\n output_layer_size = (num_classes * (num_classes - 1)) // 2\n # Modell für OvO vorbereiten (tanh() als letzte Schicht im Netz einfügen)\n output_layer = keras.layers.Dense(output_layer_size, kernel_initializer=\"he_normal\", activation=\"tanh\")(out)\n model = keras.models.Model(inputs=model.inputs, outputs=output_layer)\n model.compile(loss=ovo_crossentropy_loss, optimizer=optimizer,\n metrics=[ovo_crossentropy_loss, ovo_accuracy_metric])\n else: # OvA\n output_layer_size = num_classes\n # Softmax Schicht am Ende des Netzes einfügen für OvA\n output_layer = keras.layers.Dense(output_layer_size, kernel_initializer=\"he_normal\", activation=\"softmax\")(\n out)\n model = keras.models.Model(inputs=model.inputs, outputs=output_layer)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer,\n metrics=['accuracy', \"categorical_crossentropy\"])\n\n checkpoint = keras.callbacks.ModelCheckpoint(filepath=str(save_dir_cp / cp_name), monitor=\"val_loss\",\n verbose=1,\n save_best_only=True)\n callbacks = [checkpoint, keras.callbacks.LearningRateScheduler(get_learning_rate)]\n\n model.summary()\n # Trainiere Netz (mit oder ohne Data-Augmentation)\n if _DATA_AUGMENTATION:\n history = model.fit_generator(data_augmentation.flow(x_train, y_train, batch_size=_BATCH_SIZE),\n validation_data=(x_test, y_test),\n epochs=epochs, shuffle=True, workers=1, verbose=1,\n steps_per_epoch=steps_per_epoch,\n callbacks=callbacks) # TODO workers=4 in Pawara, thread safe warning\n else:\n history = model.fit(x=x_train, y=y_train, batch_size=_BATCH_SIZE,\n validation_data=(x_test, y_test),\n epochs=epochs, shuffle=True, workers=1, verbose=1,\n steps_per_epoch=steps_per_epoch,\n callbacks=callbacks) # TODO workers=4 in Pawara, thread safe warning\n end = datetime.now()\n elapsed = (end - start).total_seconds() / 60 # benötigte Zeit für das Training (und Laden des Datensatzes)\n\n # Speichere die history als pickle-Datei\n with open(save_dir / \"historySave.dat\", 'wb') as pickle_file:\n pickle.dump(history.history, pickle_file)\n\n # Acc und Loss für Test und Train ausrechnen\n acc_test, loss_test = evaluate_model(model, x_test, y_test, is_ovo, save_dir, \"test\")\n acc_train, loss_train = evaluate_model(model, x_train, y_train, is_ovo, save_dir, \"train\")\n # Ergebnis in Logdatei schreiben\n with open(save_dir.parent.parent / \"allModelsLog.txt\", \"a+\") as log_file:\n log_string = \"%s,%.2f,%s,%s,\" % (\n get_gpu_name(), elapsed, _BATCH_SIZE, learning_rate) + current_model_string + \",\" + str(\n loss_train) + \",\" + str(acc_train) + \",\" + str(loss_test) + \",\" + str(acc_test)\n log_file.write(log_string + \"\\n\")\n print(log_string)\n print(\"Finale Accuracy (Train): \" + str(acc_train))\n print(\"Finaler Loss (Train): \" + str(loss_train))\n print(\"Finale Accuracy (Test): \" + str(acc_test))\n print(\"Finaler Loss (Test): \" + str(loss_test))", "def training_target(training_data):\n return training_data.status == \"DEFAULT\"" ]
[ "0.75731736", "0.75731736", "0.7269636", "0.7236012", "0.7080342", "0.70689183", "0.69955844", "0.6983821", "0.6983821", "0.6983821", "0.6983821", "0.6983821", "0.69157493", "0.69107115", "0.68860173", "0.68713254", "0.6867261", "0.68614346", "0.6843705", "0.68286896", "0.6795557", "0.6753618", "0.6686101", "0.6676698", "0.6650902", "0.66411924", "0.6640461", "0.6623677", "0.6603852", "0.6598303", "0.6597998", "0.65604603", "0.65555704", "0.65473", "0.6542546", "0.6529013", "0.6529013", "0.6506921", "0.64948994", "0.64492047", "0.6444408", "0.6440427", "0.6390093", "0.6360288", "0.6359268", "0.63570464", "0.6329145", "0.63191247", "0.63072413", "0.6305049", "0.62704843", "0.62684405", "0.6262903", "0.62549657", "0.62549657", "0.62549657", "0.62549657", "0.62549657", "0.62162066", "0.621516", "0.621516", "0.621516", "0.621516", "0.6193908", "0.61908245", "0.6188755", "0.6188446", "0.6151702", "0.61505646", "0.6140156", "0.6132129", "0.6120538", "0.6105765", "0.6088032", "0.6082772", "0.6073119", "0.6063658", "0.60602707", "0.6057566", "0.60516894", "0.6040037", "0.6029751", "0.602708", "0.60176206", "0.6015985", "0.60126114", "0.60120034", "0.60071415", "0.6005983", "0.6004223", "0.6000243", "0.5996492", "0.5986992", "0.5975351", "0.59730417", "0.59645104", "0.59619737", "0.5959681", "0.5958188", "0.5957206" ]
0.64429736
41
A subfunction that ensuring the eval mode.
def _set_eval(self): if self.model.__dict__['training']: self.model.eval()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _should_eval(self):\n return False", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n pass", "def test(self):\n self.eval()", "def eval(*args, **kwargs):\n\n pass", "def eval(*args, **kwargs)->Any:\n pass", "def eval(self):\n raise NotImplementedError", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def setEvaluationMode(self, newMode):\n \n pass", "def eval(self):\n raise NotImplemented()", "def set_eval(self):\n self.eval()\n self.volatile = True", "def set_eval(self):\n self.eval()\n self.volatile = True\n self.scheduled_sampling = False", "def eval_mode(self):\n return self._eval_mode", "def c_test_eval_inp(self, population, run_locals):\r\n return 1", "def eval(self, A):\n\t\tpass", "def _evalAndDer(self, x):\n raise NotImplementedError()", "def test_set_eval_mode(self, mock_eval, mock_call):\n mock_mgr = MagicMock()\n mock_mgr.attach_mock(mock_eval, 'eval')\n mock_mgr.attach_mock(mock_call, 'call')\n\n evaluator = Evaluator(batch_size=64)\n with patch('seq2seq.evaluator.evaluator.torch.stack', return_value=None), \\\n patch('seq2seq.loss.NLLLoss.eval_batch', return_value=None):\n evaluator.evaluate(self.seq2seq, self.dataset)\n\n num_batches = int(math.ceil(len(self.dataset) / evaluator.batch_size))\n expected_calls = [call.eval()] + num_batches * [call.call(ANY)]\n self.assertEquals(expected_calls, mock_mgr.mock_calls)", "def provoke_and_handle_SyntaxError():\n try:\n eval(\"x === x\")\n except SyntaxError as se:\n print(f\"Sorry! You can't use eval in that way: {se}\")", "def _evalContext(self):\n def xor(*args):\n return sum(args) == 1\n def neg(result):\n return not result\n context = {\n 'xor': xor,\n 'neg': neg\n }\n return context", "def set_eval(self):\n self.model.eval()", "def eval(self, expr, locals):\r\n sav = self.locals_ptr\r\n self.locals_ptr = locals\r\n x = eval(self.compile(expr), {\"__builtins__\":self.eval_allowed_globals}, locals)\r\n self.locals_ptr = sav\r\n return x", "def infer(self):\n self.eval()", "def eval(self):\n self.mode = \"eval\"\n self.online_net.eval()", "def eval(self):\n self.mode = \"eval\"\n self.online_net.eval()", "def set_eval(self, eval: bool):\n self.brain.set_eval(eval)", "def validate_Eval(result, _dummy_expression):\n return result is not None", "def _eval_subs(self, old, new):\n return None", "def eval(expr):\n global simulator\n\n if simulator is None:\n print \"program is not running\"\n return\n return simulator.eval (expr)", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\r\n \"*** YOUR CODE HERE ***\"\r\n util.raiseNotDefined()", "def mctsEvalFunction(state):\n return 1 if state.isWin() else 0", "def eval(self, x, asscalar: bool = False):\n raise NotImplementedError", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")", "def eval(self):\n return self.with_transforms(\"eval\")", "def exec_builtin(self, cmd):\r\n func = Builtin.builtins.get(cmd[0])\r\n if func is None:\r\n return False\r\n func(self, cmd)\r\n return True", "def evaluate():\n click.echo(\"Not implemented yet. In the future, this command will be used for evaluation.\")\n sys.exit(-2)", "def runeval(self, expr, globals=None, locals=None):\n if globals is None:\n import __main__\n globals = __main__.__dict__\n if locals is None:\n locals = globals\n self.reset()\n sys.settrace(self.trace_dispatch)\n try:\n return eval(expr, globals, locals)\n except BdbQuit:\n pass\n finally:\n self.quitting = True\n sys.settrace(None)", "def evaluate(self) :\n pass", "def to_execute(self):\r\n return True", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def _validate_builtin(_):\n pass", "def test_lazy_evaluation(self):\n pass", "def evaluate(self):\r\n raise Exception(\"Not implemented.\")", "def validate(self, mode): # pragma: no cover\n pass", "def eval_step(self, *args, **kwargs):\n raise NotImplementedError", "def test_no_requirements(self):\n def f():\n pass\n self._run_as_operator(f)", "def eval(self):\n self.train(mode=False)", "def fix_evaluator(self):\n with self.override_evaluator(self._get_evaluators()):\n yield", "def evaluator_side_effect(_, __, math_string):\r\n if math_string != '4':\r\n raise err", "def usesEvaluationManager(self):\n \n pass", "def with_continual_eval(self):\n return self._with_continual_eval", "def evaluator(self, candidates, args):\n\t\traise NotImplementedError", "def fail_unless_eval(xmlfile, eval_string, msg=None):\n env = feedparser.parse(xmlfile)\n if not eval(eval_string, globals(), env):\n failure = msg or f\"not eval({eval_string}) \\nWITH env({pprint.pformat(env)})\"\n raise AssertionError(failure)\n failure = f\"not everything is unicode \\nWITH env({pprint.pformat(env)})\"\n assert everything_is_unicode(env), failure", "def test_RestrictingNodeTransformer__visit_LtE__1():\n assert restricted_eval('1 <= 3') is True", "def evaluate(self, edict):\n pass", "def evalOnEnd(self):\n\n return None", "def test_RestrictingNodeTransformer__visit_GtE__1():\n assert restricted_eval('1 >= 3') is False", "def _check_mode_valid(mode):\n if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and\n mode != model_fn.ModeKeys.EVAL):\n raise ValueError(\"mode=%s unrecognized.\" % str(mode))", "def hook_local_eval(bridge_conn, eval_expr, eval_globals, eval_locals):\n\n # first, bind the eval function to the arguments\n prepped_function = functools.partial(eval, eval_expr, eval_globals, eval_locals)\n\n return call_execute_sync_and_get_result(prepped_function)", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception(\"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn)", "def eval(self, exp: Experiment):\n res = self.execute(exp.iseq)\n exp.result = res", "def eval(self, node):\n\n return None", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def evaluator(self, candidates, args):\r\n raise NotImplementedError", "def ev(expr):\n return eval(expr,user_ns())", "def _assert_valid_mode(mode:str):\n if not mode in [_TRAIN, _EVAL, _PREDICT]:\n raise ValueError(\"Invalid mode.\")", "def evaluate(self):\n raise Exception(\"Not implemented.\")", "def _safe_eval(expr, functions_and_constants={}, check_compiling_input=True):\n\n # Some safety checks\n assert len(expr) < 1024\n\n # Check for potential bad compiler input\n if check_compiling_input:\n check_for_pow(expr)\n\n # Compile Python source code to Python code for eval()\n code = compile(expr, '', 'eval')\n\n # Dissect bytecode back to Python opcodes\n ops = disassemble(code)\n assert len(ops) < 1024\n\n stack = []\n for op in ops:\n value = op.touch_value(stack, functions_and_constants)\n\n return value", "def test_dynamic_variable_generation_surprising():\n a = Step('a')\n b= Step(a, 1, 2)\n res = do_eval(b, a=\"adios\", adios=op.add)\n assert res(1, 2) == 3", "def startEvaluationMode(self):\n self.saved_dat_ref = self.data_ref", "def eval_loop():\n while(True):\n decision = raw_input(\"enter some mathematical operations\")\n if(decision == \"done\"):\n break\n print eval(decision)", "def evaluate():\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func()\n\t\t\t\treturn evaluate.value", "def test_repr_ef(self):\n self.assertEqual(self.ns, eval(f\"{self.ns!r}\"))", "def eval(self) -> typing.Any:\n return self.expr()", "def eval_function(function_string):\n for key,value in globals().items():\n if key == function_string and type(value) == types.FunctionType:\n value()\n return\n \n log.warn(\"Unrecognized option: \"+function_string.rsplit(\"_\",1)[0])", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def on_eval_begin(self, logs=None):", "def eval(self, i, node, fn):\r\n if self.pre_func is not None:\r\n self.pre_func(i, node, fn)\r\n fn()\r\n if self.post_func is not None:\r\n self.post_func(i, node, fn)", "def set_models_eval(self):\n raise NotImplementedError", "def __call__(value):", "def _run_evaluator(self, func, stats):\n host_stats = stats['host_stats']\n host_caps = stats['host_caps']\n extra_specs = stats['extra_specs']\n share_stats = stats['share_stats']\n\n result = evaluator.evaluate(\n func,\n extra=extra_specs,\n stats=host_stats,\n capabilities=host_caps,\n share=share_stats)\n\n return result", "def _check_evaluate_implementation(self) -> None:\n logging.debug(f\"Evaluate_batch_defined: {self._evaluate_batch_defined()}.\")\n logging.debug(f\"Evaluate full dataset defined: {self._evaluate_full_dataset_defined()}.\")\n check.not_eq(\n self._evaluate_batch_defined(),\n self._evaluate_full_dataset_defined(),\n \"Please define exactly one of: `evaluate_batch()` or `evaluate_full_dataset()`. \"\n \"For most use cases `evaluate_batch()` is recommended is recommended because \"\n \"it can be parallelized across all devices.\",\n )", "def test_RestrictingNodeTransformer__visit_Eq__1():\n assert restricted_eval('1 == int(\"1\")') is True", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def exec_function(self, args):\n raise NotImplementedError()" ]
[ "0.75202614", "0.6831532", "0.6831532", "0.6831532", "0.65361977", "0.64932793", "0.64039826", "0.6293743", "0.6179289", "0.60888994", "0.6082469", "0.6079987", "0.6057103", "0.598441", "0.5893379", "0.58848435", "0.586661", "0.5791171", "0.57718706", "0.57667834", "0.575287", "0.5723873", "0.57190835", "0.56765044", "0.56722575", "0.56722575", "0.5629901", "0.56145495", "0.5614391", "0.5602988", "0.5580148", "0.5580148", "0.5580148", "0.5580148", "0.5580148", "0.5580148", "0.5563239", "0.55573577", "0.55519265", "0.54906917", "0.54906917", "0.54822487", "0.54523104", "0.54396856", "0.54330075", "0.541724", "0.5384984", "0.53823054", "0.53675", "0.5359361", "0.53489745", "0.5347631", "0.5339251", "0.5323694", "0.5313367", "0.53099364", "0.5297461", "0.52856517", "0.5282071", "0.5281333", "0.5270216", "0.5270164", "0.5264524", "0.5251656", "0.524662", "0.52313286", "0.5229159", "0.52180666", "0.52122104", "0.5201587", "0.51989794", "0.518153", "0.518153", "0.51764596", "0.5167599", "0.5160477", "0.51591855", "0.5154665", "0.51536757", "0.51446974", "0.513569", "0.5128822", "0.51251507", "0.5111814", "0.5103981", "0.51015556", "0.50980526", "0.50976235", "0.50948304", "0.50866646", "0.50756925", "0.5069713", "0.50584406", "0.5051466", "0.50474566", "0.50474566", "0.50474566", "0.50474566", "0.50474566", "0.5047412" ]
0.5924793
14
The train function that executes a standard training flow per epoch.
def _batch_iter(self, source, target, i: int): # send data to device source = source.to(self.device) target = target.to(self.device) # the result and loss result = self.model(source) loss = self.criterion(result, target) # optimization and backward self.optimizer.zero_grad() loss.backward() self.optimizer.step() # update the loss self.epoch_loss.update(loss.item(), source.size(0)) # print the information if self.info: print(f"\rEpoch: { self.epoch } | Batch: { i } | loss: { self.epoch_loss.avg }", end="") # clean the data del source, target return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, training_steps=10):", "def train():\n pass", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train(self, batch):\n pass", "def train(self, batch_training=False):\n raise NotImplementedError", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def _train(trainer, train_data, batcher_fn, total_batch_steps = 5, seed = 1):\n for i in range(total_batch_steps):\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, i*35)\n trainer.train_step(data, targets)", "def train_one_epoch(self):\n raise NotImplementedError", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train(self, training_data):\n pass", "def train(self, num_batches: int):", "def train(self, train_data, train_labels, train_input_fn, n_epochs):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def train_epoch(self, train=False):\n # init params\n config = self.config\n writer = self.writer\n train_params = self.get_train_params()\n args = self.args\n # net, net_SP = self.net, self.net_SP\n optimizer, optimizer_SP = self.optimizer, self.optimizer_SP\n\n lr = self.get_learning_rate()\n logging.info(f\"current learning rate: {lr}\")\n\n running_losses = []\n self.save_lists = [\n \"err_q\",\n \"err_t\",\n \"epi_dists\",\n \"relative_poses_cam\",\n \"relative_poses_body\",\n ]\n dict_of_lists_in_train = init_dict_of_lists(config, self.save_lists)\n dict_of_lists_in_val = init_dict_of_lists(config, self.save_lists)\n if_val_in_train_trigger = False\n\n thd_corr = 300\n writer.add_scalar(\"training-lr\", lr, self.n_iter)\n\n # Train one epoch\n for i, sample_train in tqdm(enumerate(self.train_loader)):\n # if training\n if train:\n # eval in training script\n if (\n self.n_iter != 0\n and self.n_iter % config[\"training\"][\"val_interval_in_train\"] == 0\n ):\n if_val_in_train_trigger = True\n if if_val_in_train_trigger:\n logging.info(\n \"+++[Train]+++ Collecting training batch for %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n else:\n self.net.train()\n\n # train one batch\n (\n loss_train_out,\n dict_of_lists_in_train,\n clamp_cum,\n ) = self.train_val_batch(\n train_params,\n sample_train,\n True,\n if_val=if_val_in_train_trigger,\n dict_of_lists=dict_of_lists_in_train,\n )\n\n if if_val_in_train_trigger:\n if (\n dict_of_lists_in_train[\"count\"]\n > config[\"training\"][\"val_batches\"]\n ):\n dict_of_lists_in_train = self.flush_dict_of_lists(\n writer, \"training\", self.n_iter, **dict_of_lists_in_train\n )\n if_val_in_train_trigger = False\n else:\n # running_losses.append(loss_train_out)\n print(self.n_iter, \"%.8f\" % loss_train_out)\n self.n_iter += 1\n\n # if testing\n if args.eval and self.n_iter % config[\"training\"][\"val_interval\"] == 0:\n logging.info(\n \"+++[Val]+++ Validating %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n assert self.net.training == False\n for j, sample_val in tqdm(enumerate(self.val_loader)):\n # if not self.check_num_of_matches(sample, thd=thd_corr): continue\n logging.info(\"+++[Val]+++ Validating batch %d\" % (j))\n # logging.info(f\"frame_id: {sample_val['frame_ids']}\")\n loss_val_out, dict_of_lists_in_val, _ = self.train_val_batch(\n train_params, sample_val,\n False, if_val=True, dict_of_lists=dict_of_lists_in_val,\n ) ##### check: in order to align val and training\n self.n_iter_val += 1\n if config[\"training\"][\"val_batches\"] != -1 and (\n j > config[\"training\"][\"val_batches\"]\n ): ##### check: how to limit the validation\n break\n print(dict_of_lists_in_val.keys())\n\n ## save valdiation result (dict)\n if len(config[\"exps\"][\"filename\"]) > 3:\n # print(f\"dict_of_lists_in_val: {dict_of_lists_in_val}\")\n def get_dict(key_layer1, key_layer2, dict_of_lists):\n dict_of_array = {}\n for k in key_layer1:\n dict_of_array[k] = np.stack(dict_of_lists[k][key_layer2])\n return dict_of_array\n\n our_name, base_name = (\n config[\"exps\"][\"our_name\"],\n config[\"exps\"][\"base_name\"],\n )\n\n print(f'save dict_of_lists_in_val to {config[\"exps\"][\"filename\"]}')\n # save our results\n dict_of_lists = get_dict(\n self.save_lists, our_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{our_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # save base_name\n dict_of_lists = get_dict(\n self.save_lists, base_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{base_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # output then flush\n dict_of_lists_in_val = self.flush_dict_of_lists(\n writer, \"validating\", self.n_iter, **dict_of_lists_in_val\n )\n\n # epoch_loss = np.mean(np.asarray(running_losses))\n\n # training iterations\n self.epoch += 1\n if self.n_iter > config[\"training\"][\"train_iter\"]:\n break\n return 0.0, self.clamp_cum, self.n_iter, self.n_iter_val", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def train_step(self):\n pass", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self, trainData):\n pass", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train_epoch(self):\n\n if self._train_data_set is not None and self._train_data_set is not None:\n self._model.fit_num_epochs(self._train_data_set, self._test_data_set)\n else:\n raise RuntimeError(\"[Triggerbot]: No training or test set available\")", "def train_epoch(model, train_dataloader, optimizer, loss_fn):\n model.train()\n total_training_loss = 0\n for batch_index, batch in enumerate(train_dataloader):\n batch = batch[0].view(-1,1,28,28).float()\n output_batch = model(batch)\n loss = loss_fn(batch, output_batch, model.prev_means, model.prev_vars)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_training_loss += loss", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\tloss = self.loss(self.model(Input)[:,0],Output)\n\t\t\ttrain_loss += loss.item()\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\t\t\tself.current_iteration += 1\n\n\t\tself.summary_writer.add_scalar('training/loss', loss.item(), self.current_epoch)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def step(self, epoch):\n\n self.train(epoch)\n self.test(epoch)", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def train(model, trainloader, device, optimizer, loss_function, epoch):\n global train_losses\n model.train()\n train_iter = 0\n loss_meter = AverageMeter(\"train-avg\")\n for x, _ in trainloader:\n x = x.to(device)\n z, logdet, _, logp = model(preprocess(x))\n loss = loss_function(logp, logdet, x.size())\n\n # code for rosalinty model\n # log_p_sum, logdet, z_outs = model(preprocess(x))\n # loss = loss_function(log_p_sum, logdet, x.size())\n\n if(train_iter % 10 == 0):\n print(f\"iteration: {train_iter}, loss: {loss.item()}\", end=\"\\r\")\n \n model.zero_grad()\n loss_meter.update(loss.item())\n loss.backward()\n optimizer.step()\n train_iter += 1\n print(f\"epoch complete, mean loss: {loss_meter.avg}\")\n train_losses.append({\"epoch\": epoch, \"avg_loss\": loss_meter.avg})", "def train(self,\n epochs=10,\n track_every=20):\n self.model.train()\n print(\"Model put in training mode.\")\n\n for i in range(epochs):\n stop_training = False\n batch_losses = []\n for j, sample in enumerate(self.training_set):\n\n # Run single loop.\n loss = self.partial_fit(sample)\n batch_losses.append(loss)\n self.print_progress(epoch=i,\n batch=j,\n loss=loss)\n\n if j % track_every == 0 and j != 0:\n batch_loss = numpy.mean(numpy.array(batch_losses))\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n stop_training = self.estopper.check_stop_training(val_loss)\n\n if stop_training:\n break\n\n # End batch iteration.\n\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n if stop_training:\n print(\"Early stopping.\")\n torch.save(self.model, self.save_dir + \"model.pt\")\n print(f\"Model saved to {self.save_dir}model.pt\")\n break\n\n # End training loop.", "def train_epoch(self):\n for it in range(self.iter_per_epoch):\n # Get batch\n xs, _ = self.mnist.train.next_batch(100)\n _, loss, summary = self.sess.run([self.train_op, self.loss, self.summary_op],\n {self.x: xs})\n self.summary_writer.add_summary(summary, it)\n if it % 1000 == 0:\n print('Iteration {}\\t loss: {}'.format(it, loss))", "def _train_epoch(self, model, tqdm_data,\n optimizer_disc=None, optimizer_gen=None):", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def forward_train(self, *args, **kwargs):\n raise NotImplementedError('This interface should not be used in current training schedule. Please use `train_step` for training.')", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def train_an_epoch(self, train_loader, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0.0\n\n for batch_data in train_loader:\n loss = self.train_single_batch(batch_data)\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "def train(self):\n p = self._params\n if self.train_data != None:\n tens_to_log = self.params.tens_to_log\n logging_hook = tf.train.LoggingTensorHook(tensors = tens_to_log,\n every_n_iter = p.logging_step,\n )\n t_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.train_data[\"x\"]},\n y = self.train_data[\"y\"],\n batch_size = p.batch_size,\n num_epochs = None,\n shuffle = True,\n )\n self._model.train(input_fn = t_fn,\n steps = self.params.training_steps,\n hooks = [logging_hook],\n )\n \n if self.eval_data != None:\n e_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.eval_data[\"x\"]},\n y = self.eval_data[\"y\"],\n num_epochs = 1,\n shuffle = False,\n )\n eval_results = self.model.evaluate(input_fn = e_fn,\n checkpoint_path = self.model_dir,\n )\n print(eval_results)", "def __call__(self, initial_lr, step, epoch):\n\n pass", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def train(self):\n self.training = True", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self, epoch=50):\n # self.history = self.model.fit(self.train_images,\n # self.train_labels,\n # epochs=epoch,\n # validation_data=(self.test_images, self.test_labels))\n datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n # prepare iterator\n it_train = datagen.flow(self.train_images, self.train_labels, batch_size=64)\n # fit model\n steps = int(self.train_images.shape[0] / 64)\n self.history = self.model.fit_generator(it_train, steps_per_epoch=steps,\n epochs=epoch,\n validation_data=(self.test_images,\n self.test_labels),\n verbose=1)\n # evaluate model\n _, acc = self.model.evaluate(self.test_images, self.test_labels, verbose=0)\n LOGGER.info('> %.3f' % (acc * 100.0))\n self.summarize_diagnostics()", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train_epoch(data_loader, model, optimizer, criterion, device, fold, epoch):\n\tmodel.train()\n\tfor inputs, input_lens, labels in tqdm.tqdm(data_loader, ncols=100, desc=f\"train-- F: {fold} -- E: {epoch}\"):\n\t\tinputs = inputs.to(device)\n\t\tlabels = labels.to(device)\n\t\t#input_lens = input_lens.to(device)\n\n\t\toptimizer.zero_grad()\n\t\tpreds = model(inputs, input_lens)\n\t\t\n\t\tloss = criterion(preds, labels.unsqueeze(1))\n\t\tloss.backward()\n\t\toptimizer.step()", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n\n if self.config[\"amp\"]:\n # AMP!\n with autocast():\n output = self.model(data)\n loss = self.criterion(output, target)\n else:\n output = self.model(data)\n loss = self.criterion(output, target)\n\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), loss.item()\n )\n )\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "def train(self, ):\n raise NotImplementedError", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def train(self):\n raise NotImplementedError", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self):\n for epoch in range(self.current_epoch, self.config.optim.epochs):\n self.current_epoch = epoch\n self.train_one_epoch()\n if epoch % self.config.optim.val_freq == 0:\n self.validate()\n if self.config.optim.auto_schedule:\n self.scheduler.step(self.current_val_loss)\n self.save_checkpoint()", "def train(self, X_t_, W_previous_, pf_value_previous_, dailyReturn_t_):\n self.sess.run(self.train_op, feed_dict={self.X_t: X_t_,\n self.W_previous: W_previous_,\n self.pf_value_previous: pf_value_previous_,\n self.dailyReturn_t: dailyReturn_t_})", "def train(self, training_data, cfg, **kwargs):\n pass", "def train(self, input_fn, steps):\n self._estimator.train(input_fn=input_fn, max_steps=steps)", "def train_epoch(self, epoch):\n device_mapper = self.device_mapper\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, data in enumerate(self.data_loader):\n data = device_mapper.map_modules(data, non_blocking=True)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n loss = self.loss(output, data)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item(), batch_size=output.size(0))\n for met in self.metrics:\n self.train_metrics.update(met.name(), met(output, data), batch_size=output.size(0))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: %d %s Loss: %.6f', epoch, self._progress(batch_idx), loss.item())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self.valid_epoch(epoch)\n log.update(**{'val_' + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.step(log[\"val_roc_auc\"])\n else:\n self.lr_scheduler.step()\n return log", "def TrainOneStep(self):\n pass", "def train(self, train_fn, dev_fn):\n X_train, Y_train = self.load_dataset(train_fn)\n X_dev, Y_dev = self.load_dataset(dev_fn)\n logging.debug(\"Classes: {}\".format((self.num_of_classes(), self.classes_())))\n # Set model params, called here after labels have been identified in load dataset\n self.model_fn()\n\n # Create a callback to print a sample after each epoch\n logging.debug(\"Training model on {}\".format(train_fn))\n self.model.fit(X_train, Y_train,\n batch_size = self.batch_size,\n epochs = self.epochs,\n validation_data = (X_dev, Y_dev),\n callbacks = self.get_callbacks(X_train))", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train(self, x_train, y_train, x_val, y_val):\n pass", "def train(self, train, dev):\r\n best_score = 0\r\n nepoch_no_imprv = 0 # for early stopping\r\n self.add_summary() # tensorboard\r\n lost_list = []\r\n acc_list = []\r\n for epoch in range(self.config.nepochs):\r\n self.logger.info(\"Epoch {:} out of {:}\".format(epoch + 1,\r\n self.config.nepochs))\r\n\r\n score, loss_hist, acc = self.run_epoch(train, dev, epoch)\r\n self.config.lr *= self.config.lr_decay # decay learning rate\r\n lost_list.extend(loss_hist)\r\n acc_list.append(acc)\r\n # early stopping and saving best parameters\r\n if score >= best_score:\r\n nepoch_no_imprv = 0\r\n self.save_session()\r\n best_score = score\r\n self.logger.info(\"- new best score!\")\r\n\r\n else:\r\n nepoch_no_imprv += 1\r\n if nepoch_no_imprv >= self.config.nepoch_no_imprv:\r\n self.logger.info(\"- early stopping {} epochs without \"\\\r\n \"improvement\".format(nepoch_no_imprv))\r\n\r\n break\r\n return lost_list, acc_list", "def wrapper_train(tree_depth, demos, validation_demos, pred_data=[None,None], verbose=True):\n return train(program_gen_step_size = 1000, \n num_programs = NUM_PROGRAMS, \n num_dts = 5, \n max_num_particles = 25, \n input_demos = demos, \n further_demos = validation_demos, \n tree_depth = tree_depth, \n return_prior=True,\n pred_data=pred_data,\n verbose=verbose)", "def epoch_train(tools, **kwargs):\n sess = tools.sess\n optimizer = tools.optimizer\n\n feed_dict = kwargs.get(\"feed_dict\", {})\n\n infos, summary, e, _ = sess.run(tools.infos, feed_dict=feed_dict)\n if config.VERBOSE_EACH:\n if not int(e) % config.VERBOSE_EACH:\n print(config.INFOMESSAGE(infos))\n sys.stdout.flush()\n else:\n print(config.INFOMESSAGE(infos))\n sys.stdout.flush()\n\n tools.reporter(summary, e)\n\n try:\n if not feed_dict:\n while True:\n sess.run(optimizer)\n else:\n while True:\n sess.run(optimizer, feed_dict=feed_dict)\n except tf.errors.OutOfRangeError:\n pass\n return infos", "def train(self, training, epochs, group):\n for epoch in range(epochs):\n self.input_matrix={}\n self.back_propagation_learning(training)\n acc = accuracy(self, group)\n print(\"Accuracy on epoch {} is {} \".format(epoch, acc))", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def run_step(self):\n self.hooked_sess.run(self.train_op)", "def train_func(self, data):\n self.net.train()\n\n outputs, losses = self.forward(data)\n\n self.update_network(losses)\n self.record_losses(losses, 'train')\n\n return outputs, losses", "def train(self, train_loader):\n pass", "def forward_train(self, *args, **kwargs):\n pass", "def train_one_epoch(sess, tr_model, i_epoch, run_metadata):\n tr_loss, i = 0, 0\n stime = time.time()\n while True:\n try:\n if NNET_PARAM.time_line:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size],\n options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\n run_metadata=run_metadata)\n else:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size])\n tr_loss += loss\n if (i+1) % NNET_PARAM.minibatch_size == 0:\n if NNET_PARAM.time_line and NNET_PARAM.timeline_type == 'minibatch':\n tl = timeline.Timeline(run_metadata.step_stats)\n ctf = tl.generate_chrome_trace_format()\n with open('_timeline/%03dtimeline%04d.json' % (i_epoch, i+1), 'w') as f:\n f.write(ctf)\n lr = sess.run(tr_model.lr)\n costtime = time.time()-stime\n stime = time.time()\n print(\"MINIBATCH %05d: TRAIN AVG.LOSS %04.6f, \"\n \"(learning rate %02.6f)\" % (\n i + 1, tr_loss / (i*NNET_PARAM.batch_size+current_batchsize), lr), 'cost time: %06dS' % costtime)\n sys.stdout.flush()\n i += 1\n except tf.errors.OutOfRangeError:\n break\n tr_loss /= ((i-1)*NNET_PARAM.batch_size+current_batchsize)\n return tr_loss", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def on_train_epoch_start(self):\n for callback in self.callbacks:\n callback.on_train_epoch_start(self, self.get_model())", "def train_models(self):\n\n #keep track on the number of iterations (needed to scale lambda)\n nr_iteration = 0\n \n for epoch in range(self.epochs):\n start = time.time()\n print()\n print(epoch + 1)\n print()\n for step, batch in enumerate(self.training_data):\n X_batch = normalize_images(tf.cast(batch[0], 'float32'))\n Y_batch = batch[1]\n Z_batch = self.ae_model.encode(X_batch)\n \n self.train_step_disc(Z_batch, Y_batch)\n # Call only one tf.function when tracing.\n #ADD LAMBDA SCHEDULE ACCORDING TO OUR EXPERIMENTS AND EPOCH LENGTH\n self.scale_lambda(self.lambda_e, nr_iteration)\n self.train_step_ae(X_batch, Y_batch, Z_batch)\n\n nr_iteration += 1\n end = time.time()\n print(\"Epoch \" + str(epoch + 1) + \" takes \" + str(end - start))", "def training_step(self, **kwargs):\n raise NotImplementedError", "def training_network(self, session, epochs, batch_size, get_batches_fn,\\\n\t\t\t\t\t\t train_op, cross_entropy_loss, image_input, correct_label,\\\n\t\t\t\t\t\t keep_prob, learning_rate, saver):\n\n\t #\n\t\tfor epoch in range(epochs):\n\t\t\t#\n\t\t\ts_time = time.time()\n\t\t\t#\n\t\t\tfor image, targets in get_batches_fn(batch_size):\n\t\t\t\t#\n\t\t\t\t_, loss = session.run( [train_op, cross_entropy_loss],feed_dict = \\\n\t\t\t\t\t\t\t\t\t {image_input: image, correct_label: targets,\\\n\t\t\t\t\t\t\t\t\t keep_prob: 0.8 , learning_rate: 0.0001 })\n\t\t\t# Print data on the learning process\n\t\t\tprint(\"Epoch: {}\".format(epoch + 1), \"/ {}\".format(epochs), \" Loss: {:.3f}\".format(loss))", "def train(self, sess):\n assert self.mode == tf.contrib.learn.ModeKeys.TRAIN\n return sess.run([self.update,\n self.train_loss,\n self.global_step,\n self.train_summary])", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target_seg, target_class) in enumerate(self.data_loader):\n data, target_seg, target_class = data.to(self.device), target_seg.to(self.device), target_class.to(self.device)\n\n self.optimizer.zero_grad()\n output_seg, output_class = self.model(data)\n loss = self.criterion((output_seg, output_class), target_seg, target_class, epoch)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n if met.__name__ == \"accuracy\":\n self.train_metrics.update(met.__name__, met(output_class, target_class))\n else:\n self.train_metrics.update(met.__name__, met(output_seg, target_seg))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n\n self._visualize_input(data.cpu())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return log", "def TrainEpoch(ss):\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()", "def train_step(self, x_train, y_train):\n\n input_x_op = self.session.graph.get_operation_by_name(\"input_x\").outputs[0]\n input_y_op = self.session.graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob_op = self.session.graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n global_step_op = self.session.graph.get_operation_by_name(\"global_step\").outputs[0]\n\n optimizer_op = self.session.graph.get_operation_by_name(\"loss/optimizer\").outputs[0]\n loss_op = self.session.graph.get_operation_by_name(\"loss/loss\").outputs[0]\n\n d_ = {\n input_x_op: x_train,\n input_y_op: y_train\n }\n\n self.init_dataset(d_)\n\n train_batches_per_epoch = (len(x_train) - 1) // self.FLAGS.batch_size + 1\n\n sum_loss = 0\n for current_step in range (train_batches_per_epoch):\n\n if self.FLAGS.summary:\n _, step, summaries, loss = self.session.run(\n [optimizer_op, global_step_op, self.train_summary_op, loss_op], feed_dict={dropout_keep_prob_op: self.hyperparams['dropout_keep_prob']})\n \n self.train_summary_writer.add_summary(summaries, step)\n else:\n _, step, loss = self.session.run(\n [optimizer_op, global_step_op, loss_op], feed_dict={dropout_keep_prob_op: self.hyperparams['dropout_keep_prob']})\n \n sum_loss += loss\n\n time_str = datetime.datetime.now().isoformat()\n if (current_step + 1) % 10 == 0:\n print(\"{}: step {}/{}, loss {:g}\".format(time_str, current_step + 1, train_batches_per_epoch, loss))\n\n mean_loss = sum_loss/ train_batches_per_epoch\n\n return mean_loss", "def run_epoch(self, train, dev, epoch):\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, self.config.batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss= self.sess.run(\n [self.train_op, self.loss], feed_dict=fd)\n\n# =============================================================================\n# # tensorboard\n# if i % 10 == 0:\n# self.file_writer.add_summary(summary, epoch*nbatches + i)\n# =============================================================================\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n print(msg)\n\n return metrics[\"f1\"]", "def trainNet():", "def train(self, session, *args, train_data_iterator=None,\n dev_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement train() method\")", "def train_model_epoch(self, epoch_idx, tuning=False):\n acc_loss = 0\n\n num_batch = self.model.config.kg_meta.tot_train_triples // self.config.batch_size if not self.debug else 10\n \n metrics_names = ['acc_loss', 'loss'] \n progress_bar = tf.keras.utils.Progbar(num_batch, stateful_metrics=metrics_names)\n\n for batch_idx in range(num_batch):\n data = list(next(self.generator))\n \n if self.training_strategy == \"projection_based\":\n h = tf.convert_to_tensor(data[0], dtype=tf.int32)\n r = tf.convert_to_tensor(data[1], dtype=tf.int32)\n t = tf.convert_to_tensor(data[2], dtype=tf.int32)\n hr_t = data[3] # tf.convert_to_tensor(data[3], dtype=tf.float32)\n rt_h = data[4] # tf.convert_to_tensor(data[4], dtype=tf.float32)\n loss = self.train_step_projection(h, r, t, hr_t, rt_h)\n elif self.training_strategy == \"pointwise_based\":\n h = tf.convert_to_tensor(data[0], dtype=tf.int32)\n r = tf.convert_to_tensor(data[1], dtype=tf.int32)\n t = tf.convert_to_tensor(data[2], dtype=tf.int32)\n y = tf.convert_to_tensor(data[3], dtype=tf.float32)\n loss = self.train_step_pointwise(h, r, t, y)\n else:\n ph = tf.convert_to_tensor(data[0], dtype=tf.int32)\n pr = tf.convert_to_tensor(data[1], dtype=tf.int32)\n pt = tf.convert_to_tensor(data[2], dtype=tf.int32)\n nh = tf.convert_to_tensor(data[3], dtype=tf.int32)\n nr = tf.convert_to_tensor(data[4], dtype=tf.int32)\n nt = tf.convert_to_tensor(data[5], dtype=tf.int32)\n loss = self.train_step(ph, pr, pt, nh, nr, nt)\n\n acc_loss += loss\n\n if not tuning:\n progress_bar.add(1, values=[('acc_loss', acc_loss), ('loss', loss)])\n\n self.training_results.append([epoch_idx, acc_loss.numpy()])\n\n return acc_loss", "def train(self):\n start_time = time()\n self.model.train()\n\n for step, sample in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n loss.backward()\n self.train_losses.append(loss.item())\n\n self.optimizer.step(None)\n\n # print an incredible progress bar\n print(f'\\r{self.progress_bar} │ Loss: {np.mean(self.train_losses):.6f}', end='')\n self.progress_bar.inc()\n\n # log average loss of this epoch\n mean_epoch_loss = np.mean(self.train_losses)\n self.sw.add_scalar(tag='train_loss', scalar_value=mean_epoch_loss, global_step=self.epoch)\n self.train_losses = []\n\n # log epoch duration\n print(f' │ T: {time() - start_time:.2f} s')" ]
[ "0.8151526", "0.7828467", "0.7768698", "0.7768698", "0.7768698", "0.7768698", "0.7735363", "0.76838636", "0.7644439", "0.76343524", "0.7605618", "0.76051074", "0.7574267", "0.7563756", "0.7548235", "0.7517042", "0.7506631", "0.7495705", "0.7478745", "0.7474352", "0.74734735", "0.7470554", "0.7449237", "0.7437643", "0.74211955", "0.7420581", "0.740238", "0.739899", "0.73681587", "0.73404276", "0.73382944", "0.73382944", "0.7294746", "0.72925466", "0.7288471", "0.72877926", "0.7284455", "0.7284455", "0.7284455", "0.7284455", "0.7284455", "0.7279511", "0.72586805", "0.7256762", "0.7243424", "0.7231177", "0.72264004", "0.7217786", "0.7197311", "0.71888", "0.7177611", "0.7174215", "0.7172929", "0.7168296", "0.71522534", "0.7147634", "0.71376806", "0.71360284", "0.7135896", "0.7131012", "0.7126323", "0.7121327", "0.7120474", "0.7111019", "0.71041226", "0.71019113", "0.70964515", "0.7090185", "0.7077481", "0.7072131", "0.7066682", "0.70615923", "0.7061097", "0.705968", "0.7051325", "0.7043026", "0.7035774", "0.7026826", "0.70245343", "0.70242774", "0.70173717", "0.701292", "0.70098627", "0.7003758", "0.70026684", "0.7000846", "0.69983673", "0.69918734", "0.6989626", "0.69811577", "0.6978893", "0.69750583", "0.6971543", "0.6970433", "0.69699955", "0.69632614", "0.695875", "0.6956403", "0.6954818", "0.6954518", "0.6948979" ]
0.0
-1
The train iterator that executes a standard training flow per batch.
def _train_batch(self): # start epoch for i, (source, target) in enumerate(self.train_dataset): result = self._batch_iter(source, target, i) # yield yield result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_train_iterator(self) -> Iterable[Batch]:\n if self._train_name not in self._datasets:\n raise ValueError(\"Training data not provided.\")\n return self.get_iterator(self._train_name)", "def train_batch_iter(self, batch_size, num_epochs):\n return self.batch_iter(0, batch_size, num_epochs)", "def get_train_iterator(self) -> tf.contrib.data.Iterator:\n return self.train.make_initializable_iterator()", "def train(self, batch_training=False):\n raise NotImplementedError", "def run_train_iter(self, session, batch, summary_writer):\n # Match up our input data with the placeholders\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout\n\n # if not use raw graph tokens\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n\n # output_feed contains the things we want to fetch.\n output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm, self.dev_loss]\n\n # Run the model\n [_, summaries, loss, global_step, param_norm, gradient_norm, dev_loss] = session.run(output_feed, input_feed)\n\n # All summaries in the graph are added to Tensorboard\n summary_writer.add_summary(summaries, global_step)\n\n return loss, global_step, param_norm, gradient_norm, dev_loss", "def train(self, batch):\n pass", "def train(self, train_iter_fct, train_steps):\n logger.info('Start training...')\n\n # step = self.optim._step + 1\n step = self.optim._step + 1\n true_batchs = []\n accum = 0\n normalization = 0\n train_iter = train_iter_fct()\n\n total_stats = Statistics()\n report_stats = Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n while step <= train_steps:\n\n reduce_counter = 0\n for i, batch in enumerate(train_iter):\n if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):\n\n true_batchs.append(batch)\n normalization += batch.batch_size\n accum += 1\n if accum == self.grad_accum_count:\n reduce_counter += 1\n if self.n_gpu > 1:\n normalization = sum(distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n true_batchs, normalization, total_stats,\n report_stats)\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optim.learning_rate,\n report_stats)\n\n true_batchs = []\n accum = 0\n normalization = 0\n if step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0:\n self._save(step)\n\n step += 1\n if step > train_steps:\n break\n train_iter = train_iter_fct()\n\n return total_stats", "def train(self, num_batches: int):", "def train_epoch(self):\n for it in range(self.iter_per_epoch):\n # Get batch\n xs, _ = self.mnist.train.next_batch(100)\n _, loss, summary = self.sess.run([self.train_op, self.loss, self.summary_op],\n {self.x: xs})\n self.summary_writer.add_summary(summary, it)\n if it % 1000 == 0:\n print('Iteration {}\\t loss: {}'.format(it, loss))", "def train_step(self, iterator_map):\r\n\r\n def step_fn(inputs):\r\n losses = self.multi_task.joint_train_step(\r\n inputs,\r\n multi_task_model=self.multi_task_model,\r\n optimizer=self.optimizer,\r\n task_metrics=self.training_metrics)\r\n for key, loss in losses.items():\r\n self.training_losses[key].update_state(loss)\r\n\r\n self.strategy.run(\r\n step_fn, args=(tf.nest.map_structure(next, iterator_map),))\r\n self.global_step.assign_add(1)", "def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)", "def _train(trainer, train_data, batcher_fn, total_batch_steps = 5, seed = 1):\n for i in range(total_batch_steps):\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, i*35)\n trainer.train_step(data, targets)", "def iter_batch(self):\n\n # model initialization\n self._set_train()\n\n if not self.batch_process:\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()\n else:\n try:\n return self.batch_process.__next__()\n except StopIteration:\n # update the state if StopIteration\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1\n\n # reset the batch process\n del self.batch_process\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def iter_epoch(self):\n\n # set to train mode\n self._set_train()\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n self._batch_iter(source, target, i)\n\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def train(self, session, *args, train_data_iterator=None,\n dev_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement train() method\")", "def train_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def train(self, training_steps=10):", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})", "def train(self, data_iterator):\n \n if self.config['sequence_input']:\n if self.config['net_input_add_onehot']:\n input_data_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_input']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_input']))\n \n if self.config['sequence_output']:\n if self.config['net_target_add_onehot']:\n target_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_output']))\n \n training, loss_avg_t = self.setup_train(input_data_ph, target_ph)\n \n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n self.analyze_config()\n \n for epoch in range(self.config['epochs']):\n starttime = time.time()\n for step in range(self.config['epoch_steps']):\n input_data, target = next(data_iterator)\n tmp, loss_avg_value = session.run([training, loss_avg_t], {input_data_ph:input_data, target_ph:target})\n print(\"Epoch: {} Loss: {} Elapsed:{}s\".format(epoch, loss_avg_value, (time.time() - starttime)))", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def _run_one_training_iteration(self) -> Tuple[ResultDict, \"TrainIterCtx\"]:\n # In case we are training (in a thread) parallel to evaluation,\n # we may have to re-enable eager mode here (gets disabled in the\n # thread).\n if self.config.get(\"framework\") == \"tf2\" and not tf.executing_eagerly():\n tf1.enable_eager_execution()\n\n results = None\n # Create a step context ...\n with TrainIterCtx(algo=self) as train_iter_ctx:\n # .. so we can query it whether we should stop the iteration loop (e.g.\n # when we have reached `min_time_s_per_iteration`).\n while not train_iter_ctx.should_stop(results):\n # Try to train one step.\n # TODO (avnishn): Remove the execution plan API by q1 2023\n with self._timers[TRAINING_ITERATION_TIMER]:\n if self.config._disable_execution_plan_api:\n results = self.training_step()\n else:\n results = next(self.train_exec_impl)\n\n # With training step done. Try to bring failed workers back.\n self.restore_workers(self.workers)\n\n return results, train_iter_ctx", "def train__iter__(self):\n\n # create worker-specific random number generator\n rng = create_rng_for_worker(self.model.current_epoch)\n\n while True:\n\n # select one file at random (with probability proportional to its annotated duration)\n file, *_ = rng.choices(\n self._train,\n weights=[f[\"duration\"] for f in self._train],\n k=1,\n )\n\n # select one annotated region at random (with probability proportional to its duration)\n segment, *_ = rng.choices(\n file[\"annotated\"],\n weights=[s.duration for s in file[\"annotated\"]],\n k=1,\n )\n\n # select one chunk at random (with uniform distribution)\n start_time = rng.uniform(segment.start, segment.end - self.duration)\n chunk = Segment(start_time, start_time + self.duration)\n\n X, one_hot_y, _ = self.prepare_chunk(file, chunk, duration=self.duration)\n\n y = self.prepare_y(one_hot_y)\n\n yield {\"X\": X, \"y\": y}", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def step_train(self, max_iter):\n nzx, nzy = self.trn_graph.nonzero()\n n = len(self.trn_x_index)\n n_pos = len(nzx)\n for _ in range(max_iter):\n Y_pred, loss, grad_norm = self.train_fn(self.gX, self.hX, self.sym_g, self.sym_h,\n self.trn_graph, self.trn_x_index, self.trn_y_index)\n return Y_pred, loss, grad_norm", "def train(self):\n for i in xrange(self.num_steps):\n if c.ADVERSARIAL:\n # update discriminator\n batch = get_train_batch()\n print 'Training discriminator...'\n self.d_model.train_step(batch, self.g_model)\n\n # update generator\n batch = get_train_batch()\n print 'Training generator...'\n self.global_step = self.g_model.train_step(\n batch, discriminator=(self.d_model if c.ADVERSARIAL else None))\n\n # save the models\n if self.global_step % c.MODEL_SAVE_FREQ == 0:\n print '-' * 30\n print 'Saving models...'\n self.saver.save(self.sess,\n c.MODEL_SAVE_DIR + 'model.ckpt',\n global_step=self.global_step)\n print 'Saved models!'\n print '-' * 30\n\n # test generator model\n if self.global_step % c.TEST_FREQ == 0:\n self.test()", "def train(self, epoch=50):\n # self.history = self.model.fit(self.train_images,\n # self.train_labels,\n # epochs=epoch,\n # validation_data=(self.test_images, self.test_labels))\n datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n # prepare iterator\n it_train = datagen.flow(self.train_images, self.train_labels, batch_size=64)\n # fit model\n steps = int(self.train_images.shape[0] / 64)\n self.history = self.model.fit_generator(it_train, steps_per_epoch=steps,\n epochs=epoch,\n validation_data=(self.test_images,\n self.test_labels),\n verbose=1)\n # evaluate model\n _, acc = self.model.evaluate(self.test_images, self.test_labels, verbose=0)\n LOGGER.info('> %.3f' % (acc * 100.0))\n self.summarize_diagnostics()", "def train_next_batch(self, batch_size=None):", "def __iter__(self):\n return iter((self.train_stats, self.preprocessed_data, self.output_directory))", "def train_generator(self, train, validation=None, epochs=20, class_weight=None):\n history = self.model.fit_generator(\n generator=train, validation_data=validation,\n epochs=epochs, shuffle=True, class_weight=class_weight)\n self.training_history.append(\n ({\"epochs\": epochs, \"class_weight\": class_weight}, history)\n )\n self.data_ids = {\n \"train\": train.dataset.labels,\n \"validation\": validation.dataset.labels if validation else [],\n }\n return history", "def train_model(self\n\t\t, epochs=100\n\t\t, minibatch_size=20\n\t\t, yield_every_iteration=False):\n\n\t\tif self.input_batch is None:\n\t\t\traise ValueError(\"Denoising autoencoder must be initialised with \"\n\t\t\t\t\"input data to train model independently.\")\n\t\tif self.output_batch is None:\n\t\t\traise ValueError(\"RMI denoising autoencoder must be initialised \"\n\t\t\t\t\"with output data to train model independently.\")\n\n\t\tbatch_count = self.input_batch.get_value(\n\t\t\tborrow=True).shape[0]//minibatch_size\n\n\t\tfor epoch in xrange(epochs):\n\t\t\tcosts = []\n\t\t\tfor index in xrange(batch_count):\n\t\t\t\tcost = self.train_model_once(index, minibatch_size)\n\t\t\t\tcosts.append(cost)\n\t\t\t\tif yield_every_iteration:\n\t\t\t\t\tyield (index, cost)\n\n\t\t\tif not yield_every_iteration:\n\t\t\t\tyield (epoch, numpy.mean(costs))", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # for each batch\n for _ in range(self.params.num_batches):\n # sample memories\n mem_states, mem_controls, mem_rewards, mem_next_states, mem_continues = \\\n (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # train the critic\n max_q = self.sess.run(self.graph.target_critic_outputs, feed_dict={self.graph.states: mem_next_states})\n td_target = mem_rewards + mem_continues * self.params.discount_factor * max_q\n self.reg_loss_val, self.critic_loss_val, _ = self.sess.run(\n [self.graph.critic_reg_loss, self.graph.critic_loss, self.graph.critic_training_op],\n feed_dict={self.graph.states: mem_states, self.graph.actor_outputs: mem_controls,\n self.graph.td_target: td_target})\n # train the actor\n neg_mean_q_val, _ = self.sess.run([self.graph.neg_mean_q, self.graph.actor_training_op],\n feed_dict={self.graph.states: mem_states})\n self.mean_q_val = -1.0 * neg_mean_q_val\n # copy to target\n self.sess.run(self.graph.copy_online_to_target)", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def __iter__(self) -> Iterator[Batch]:\n return self.get_iterator()", "def __iter__(self):\n if not self.loading:\n self.reset_loading()\n self.current_batch_index = 0\n return self", "def train(self, bytes_gen: Iterator[bytes] = None, **kwargs):\n self._call_client(bytes_gen, mode='train', **kwargs)", "def _create_train_iterator(self, patch_locations):\n dataset_locations = patch_locations['valid_locations_train']\n\n dataset = tf.data.Dataset.from_tensor_slices(dataset_locations)\n dataset = dataset.map(self._parse_function)\n batched_dataset = dataset.batch(self._settings.batch_size)\n iterator = batched_dataset.make_one_shot_iterator()\n\n return iterator", "def train(self):\n for doc, label in zip(self.train_docs(), self.train_labels()):\n yield doc, label", "def get_train_batch_generator(self, size):\n self.shuffle_train()\n while self.train_position + size < len(self.train):\n yield self.unzip_batch(self.train[self.train_position:self.train_position + size])\n self.train_position = self.train_position + size", "def train_epoch(self,batch_iterator, cost_only=False, verbose=True):\n count = 0.\n total_cost = 0.\n \n for step, b in enumerate(batch_iterator):\n count += len(b)\n cost = self.train_batch(b, cost_only)\n\n total_cost += cost * len(b)\n \n if (1+step) % PRINT_FREQ == 0 and verbose:\n m = \" Step {:3d}, cost: {:.4f}, avg cost: {:.4f}\".format(\n step+1, cost, total_cost/count)\n log(m)\n \n if (1+step) > PRINT_FREQ and (1+step) % PRINT_FREQ != 0 and verbose:\n m = \" Step {:3d}, cost: {:.4f}, avg cost: {:.4f}\".format(\n step+1, cost, total_cost/count)\n log(m)\n\n return (total_cost)/(count)", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def train(model, data_iterator, optimizer, scheduler, params, steps_num):\n # set model to training mode\n model.train()\n # scheduler.step()\n # a running average object for loss\n loss_avg = utils.RunningAverage()\n \n # Use tqdm for progress bar\n t = trange(steps_num)\n for _ in t:\n # fetch the next training batch\n batch_data, batch_labels = next(data_iterator)\n\n # compute model output and loss\n batch_output = model(batch_data)\n loss = model.loss(batch_output, batch_labels)\n\n # clear previous gradients, compute gradients of all variables wrt loss\n model.zero_grad()\n # optimizer.zero_grad()\n loss.backward()\n\n # gradient clipping\n nn.utils.clip_grad_norm_(model.parameters(), params.clip_grad)\n\n # performs updates using calculated gradients\n optimizer.step()\n\n # update the average loss\n loss_avg.update(loss.item())\n t.set_postfix(loss='{:05.3f}'.format(loss_avg()))\n\n return loss_avg()", "def self_play_iterator_creator(hparams, num_workers, jobid):\n vocab_table = vocab_utils.create_vocab_tables(hparams.vocab_file)[0]\n data_dataset = tf.data.TextLineDataset(hparams.train_data)\n kb_dataset = tf.data.TextLineDataset(hparams.train_kb)\n skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)\n # this is the actual iterator for supervised training\n train_iterator = iterator_utils.get_iterator(\n data_dataset,\n kb_dataset,\n vocab_table,\n batch_size=hparams.batch_size,\n t1=hparams.t1.encode(),\n t2=hparams.t2.encode(),\n eod=hparams.eod,\n len_action=hparams.len_action,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n max_dialogue_len=hparams.max_dialogue_len,\n skip_count=skip_count_placeholder,\n num_shards=num_workers,\n shard_index=jobid)\n\n # this is the actual iterator for self_play_fulltext_iterator\n data_placeholder = tf.placeholder(\n shape=[None], dtype=tf.string, name=\"src_ph\")\n kb_placeholder = tf.placeholder(shape=[None], dtype=tf.string, name=\"kb_ph\")\n batch_size_placeholder = tf.placeholder(\n shape=[], dtype=tf.int64, name=\"bs_ph\")\n\n dataset_data = tf.data.Dataset.from_tensor_slices(data_placeholder)\n kb_dataset = tf.data.Dataset.from_tensor_slices(kb_placeholder)\n\n self_play_fulltext_iterator = iterator_utils.get_infer_iterator(\n dataset_data,\n kb_dataset,\n vocab_table,\n batch_size=batch_size_placeholder,\n eod=hparams.eod,\n len_action=hparams.len_action,\n self_play=True)\n\n # this is the actual iterator for self_play_structured_iterator\n self_play_structured_iterator = tf.data.Iterator.from_structure(\n tf.data.get_output_types(self_play_fulltext_iterator),\n tf.data.get_output_shapes(self_play_fulltext_iterator))\n iterators = [\n train_iterator, self_play_fulltext_iterator, self_play_structured_iterator\n ]\n\n # this is the list of placeholders\n placeholders = [\n data_placeholder, kb_placeholder, batch_size_placeholder,\n skip_count_placeholder\n ]\n return iterators, placeholders", "def train_step(self, batch, generator):\n ##\n # Split into inputs and outputs\n ##\n\n input_frames = batch[:, :, :, :-3]\n gt_output_frames = batch[:, :, :, -3:]\n\n ##\n # Train\n ##\n\n feed_dict = self.build_feed_dict(input_frames, gt_output_frames, generator)\n\n _, global_loss, global_step, summaries = self.sess.run(\n [self.train_op, self.global_loss, self.global_step, self.summaries],\n feed_dict=feed_dict)\n\n ##\n # User output\n ##\n\n if global_step % c.STATS_FREQ == 0:\n print 'DiscriminatorModel: step %d | global loss: %f' % (global_step, global_loss)\n if global_step % c.SUMMARY_FREQ == 0:\n print 'DiscriminatorModel: saved summaries'\n self.summary_writer.add_summary(summaries, global_step)\n\n return global_step", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def train(self, trainset):\n\n if self.epoch == 0:\n input_size = trainset.metadata['input_size']\n n_classes = len(trainset.metadata['targets'])\n self.initialize(input_size, n_classes)\n\n for it in range(self.epoch, self.n_epochs):\n for input, target in trainset:\n self.fprop(input, target)\n self.bprop(input, target)\n self.update()\n self.epoch = self.n_epochs", "def train_epoch(self, train=False):\n # init params\n config = self.config\n writer = self.writer\n train_params = self.get_train_params()\n args = self.args\n # net, net_SP = self.net, self.net_SP\n optimizer, optimizer_SP = self.optimizer, self.optimizer_SP\n\n lr = self.get_learning_rate()\n logging.info(f\"current learning rate: {lr}\")\n\n running_losses = []\n self.save_lists = [\n \"err_q\",\n \"err_t\",\n \"epi_dists\",\n \"relative_poses_cam\",\n \"relative_poses_body\",\n ]\n dict_of_lists_in_train = init_dict_of_lists(config, self.save_lists)\n dict_of_lists_in_val = init_dict_of_lists(config, self.save_lists)\n if_val_in_train_trigger = False\n\n thd_corr = 300\n writer.add_scalar(\"training-lr\", lr, self.n_iter)\n\n # Train one epoch\n for i, sample_train in tqdm(enumerate(self.train_loader)):\n # if training\n if train:\n # eval in training script\n if (\n self.n_iter != 0\n and self.n_iter % config[\"training\"][\"val_interval_in_train\"] == 0\n ):\n if_val_in_train_trigger = True\n if if_val_in_train_trigger:\n logging.info(\n \"+++[Train]+++ Collecting training batch for %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n else:\n self.net.train()\n\n # train one batch\n (\n loss_train_out,\n dict_of_lists_in_train,\n clamp_cum,\n ) = self.train_val_batch(\n train_params,\n sample_train,\n True,\n if_val=if_val_in_train_trigger,\n dict_of_lists=dict_of_lists_in_train,\n )\n\n if if_val_in_train_trigger:\n if (\n dict_of_lists_in_train[\"count\"]\n > config[\"training\"][\"val_batches\"]\n ):\n dict_of_lists_in_train = self.flush_dict_of_lists(\n writer, \"training\", self.n_iter, **dict_of_lists_in_train\n )\n if_val_in_train_trigger = False\n else:\n # running_losses.append(loss_train_out)\n print(self.n_iter, \"%.8f\" % loss_train_out)\n self.n_iter += 1\n\n # if testing\n if args.eval and self.n_iter % config[\"training\"][\"val_interval\"] == 0:\n logging.info(\n \"+++[Val]+++ Validating %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n assert self.net.training == False\n for j, sample_val in tqdm(enumerate(self.val_loader)):\n # if not self.check_num_of_matches(sample, thd=thd_corr): continue\n logging.info(\"+++[Val]+++ Validating batch %d\" % (j))\n # logging.info(f\"frame_id: {sample_val['frame_ids']}\")\n loss_val_out, dict_of_lists_in_val, _ = self.train_val_batch(\n train_params, sample_val,\n False, if_val=True, dict_of_lists=dict_of_lists_in_val,\n ) ##### check: in order to align val and training\n self.n_iter_val += 1\n if config[\"training\"][\"val_batches\"] != -1 and (\n j > config[\"training\"][\"val_batches\"]\n ): ##### check: how to limit the validation\n break\n print(dict_of_lists_in_val.keys())\n\n ## save valdiation result (dict)\n if len(config[\"exps\"][\"filename\"]) > 3:\n # print(f\"dict_of_lists_in_val: {dict_of_lists_in_val}\")\n def get_dict(key_layer1, key_layer2, dict_of_lists):\n dict_of_array = {}\n for k in key_layer1:\n dict_of_array[k] = np.stack(dict_of_lists[k][key_layer2])\n return dict_of_array\n\n our_name, base_name = (\n config[\"exps\"][\"our_name\"],\n config[\"exps\"][\"base_name\"],\n )\n\n print(f'save dict_of_lists_in_val to {config[\"exps\"][\"filename\"]}')\n # save our results\n dict_of_lists = get_dict(\n self.save_lists, our_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{our_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # save base_name\n dict_of_lists = get_dict(\n self.save_lists, base_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{base_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # output then flush\n dict_of_lists_in_val = self.flush_dict_of_lists(\n writer, \"validating\", self.n_iter, **dict_of_lists_in_val\n )\n\n # epoch_loss = np.mean(np.asarray(running_losses))\n\n # training iterations\n self.epoch += 1\n if self.n_iter > config[\"training\"][\"train_iter\"]:\n break\n return 0.0, self.clamp_cum, self.n_iter, self.n_iter_val", "def train_step(self):\n pass", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)", "def _train_loop(\n self,\n batcher,\n progress_tracker: ProgressTracker,\n save_path,\n train_summary_writer,\n progress_bar: LudwigProgressBar,\n training_set,\n validation_set,\n test_set,\n start_time,\n validation_summary_writer,\n test_summary_writer,\n model_hyperparameters_path,\n output_features,\n metrics_names,\n checkpoint_manager: CheckpointManager,\n final_steps_per_checkpoint: int,\n early_stopping_steps: int,\n ) -> bool:\n self.distributed.zero_grad(self.optimizer)\n batch_idx = 0\n while not batcher.last_batch() and progress_tracker.steps < self.total_steps:\n progress_tracker.learning_rate = self.optimizer.param_groups[0][\"lr\"]\n self.callback(lambda c: c.on_batch_start(self, progress_tracker, save_path))\n\n # obtain batch\n batch = batcher.next_batch()\n\n # determine whether we need to accumulate gradients as trigger a full parameter update\n should_sync_grads = (batch_idx + 1) % self.gradient_accumulation_steps == 0\n is_checkpoint_step = (progress_tracker.steps + 1) % final_steps_per_checkpoint == 0\n should_step = should_sync_grads or is_checkpoint_step\n batch_idx += 1\n\n # Move tensors to cuda here.\n inputs = {\n i_feat.feature_name: torch.from_numpy(np.array(batch[i_feat.proc_column], copy=True)).to(self.device)\n for i_feat in self.model.input_features.values()\n }\n targets = {\n o_feat.feature_name: torch.from_numpy(np.array(batch[o_feat.proc_column], copy=True)).to(self.device)\n for o_feat in self.model.output_features.values()\n }\n\n loss, all_losses = self.train_step(inputs, targets, should_step=should_step)\n\n # Update LR schduler here instead of train loop to avoid updating during batch size tuning, etc.\n self.scheduler.step()\n\n if self.is_coordinator() and not self.skip_save_log:\n self.write_step_summary(\n train_summary_writer=train_summary_writer,\n combined_loss=loss.detach().float(),\n all_losses=all_losses,\n step=progress_tracker.steps,\n learning_rate=progress_tracker.learning_rate,\n )\n\n progress_tracker.steps += 1\n progress_bar.set_postfix({\"loss\": float(loss)})\n progress_bar.update(1)\n if self.is_coordinator():\n logger.debug(\n f\"training: completed batch {progress_bar.total_steps} \"\n f\"memory used: \"\n f\"{psutil.Process(os.getpid()).memory_info()[0] / 1e6:0.2f}MB\"\n )\n\n # Executing `on_batch_end` calls before `run_evaluation` enables more accurate\n # batch duration measurements when using timer callbacks.\n self.callback(lambda c: c.on_batch_end(self, progress_tracker, save_path, sync_step=should_step))\n\n if progress_tracker.steps % final_steps_per_checkpoint == 0:\n if not self.skip_all_evaluation:\n should_break = self.run_evaluation(\n training_set,\n validation_set,\n test_set,\n progress_tracker,\n train_summary_writer,\n validation_summary_writer,\n test_summary_writer,\n model_hyperparameters_path,\n output_features,\n metrics_names,\n save_path,\n loss,\n all_losses,\n early_stopping_steps,\n checkpoint_manager,\n )\n else:\n should_break = False\n\n # Checkpoint the model.\n # NOTE: Ideally we would do this before evaluation, but for some reason DeepSpeed will complain\n # about inflight params if we do that, which is why we checkpoint after eval instead. In practice,\n # this should not make a difference, xcept in the unlikely event an error occurs during eval and we\n # want to resume from the last checkpoint, in which case we will lose slightly more progress this way.\n if not self.skip_save_progress:\n checkpoint_manager.save(progress_tracker.steps)\n if self.is_coordinator():\n progress_tracker.save(os.path.join(save_path, TRAINING_PROGRESS_TRACKER_FILE_NAME))\n\n if should_break:\n return should_break\n\n return False", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def train(self, max_epochs: int=100) \\\n -> Generator[Tuple[float, float, int], bool, None]:\n assert self.tf_init_done, \"Must call .init_tf() first!\"\n\n tr = tqdm.trange(max_epochs, desc='epoch', leave=True)\n mean_loss = None\n\n for epoch_num in tr:\n # only extend replay by a bit each time\n succ_rates = self._extend_replays(max(25 // len(self.problems), 1))\n succ_rate = np.mean(succ_rates)\n replay_sizes = self._get_replay_sizes()\n replay_size = sum(replay_sizes)\n tr.set_postfix(\n succ_rate=succ_rate, net_loss=mean_loss, states=replay_size)\n self._log_op_value('succ-rate', succ_rate)\n self._log_op_value('replay-size', replay_size)\n # do a few batches of SGD (should keep us close to convergence)\n mean_loss = self._optimise(300)\n tr.set_postfix(\n succ_rate=succ_rate, net_loss=mean_loss, states=replay_size)\n keep_going = yield succ_rate, mean_loss, replay_size\n if not keep_going:\n print('.train() terminating early')\n break", "def _get_train_generator(self):\n while(True):\n random.shuffle(self.train)\n for data_element in self.train:\n if self.debug: \n print(\"training on: {}\".format(data_element))\n\n image, heatmap = self._generate_input_tuple(data_element)\n\n if self.debug: \n print(\"yields: {}\".format(data_element))\n\n yield (image, heatmap)", "def run_train_step(self, sess, batch):\n feed_dict = self._make_feed_dict(batch)\n to_return = {\n 'train_op': self._train_op,\n 'summaries': self._summaries,\n 'loss': self._loss,\n 'logits': self._logits,\n 'global_step': self.global_step,\n }\n\n return sess.run(to_return, feed_dict)", "def train(model, optimizer, train_iterator, test_iterator=None, loss=nn.NLLLoss(), epochs=20):\n\n\ttrain_losses = list()\n\ttest_losses = list()\n\n\tfor epoch in range(epochs):\n\n\t\ttl = list()\n\t\tel = list()\n\n\t\t\"\"\" Training the model \"\"\"\n\t\tmodel.train()\n\t\tfor c, t in tqdm(train_iterator):\n\t\t\tc = c.to(device)\n\t\t\tt = t.to(device)\n\n\t\t\toptimizer.zero_grad()\n\t\t\tpred = model(c)\n\t\t\t_l = loss(pred, t)\n\t\t\t_l.backward()\n\t\t\toptimizer.step()\n\n\t\t\ttl.append(_l.item())\n\n\t\tif test_iterator is not None:\n\t\t\t\"\"\" Evaluating the model \"\"\"\n\t\t\tmodel.eval()\n\t\t\tfor c, t in tqdm(test_iterator):\n\t\t\t\t\n\t\t\t\twith torch.no_grad():\n\t\t\t\t\tc = c.to(device)\n\t\t\t\t\tt = t.to(device)\n\n\t\t\t\t\tpred = model(c)\n\t\t\t\t\t_l = loss(pred, t)\n\t\t\t\t\tel.append(_l.item())\n\n\n\t\ttl = np.mean(tl)\n\t\ttrain_losses.append(tl)\n\t\tif test_iterator is not None:\n\t\t\tel = np.mean(el)\n\t\t\ttest_losses.append(el)\n\n\t\t\tprint(f'Epoch : {epoch+1} / {epochs}, Train Loss : {tl}, Test Loss : {el}')\n\n\t\telse:\n\t\t\tprint(f'Epoch : {epoch+1} / {epochs}, Train Loss : {tl}')\n\n\n\treturn train_losses, test_losses", "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()", "def train(model, train_iter, dev_iter):\n \n optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n\n loss = []\n\n for eidx in range(1, NUM_EPOCHS + 1):\n model.train()\n\n epoch_loss = 0\n epoch_items = 0\n # Start training\n for batch_idx, batch in enumerate(train_iter):\n\n # Clear the gradients\n optim.zero_grad()\n en, de = prepare_batch_strategy_3(batch)\n # loss will be a vector of size (batch_size, ) with losses per every sample\n y_pred = model.forward(en.float(), de.float()).view(-1)\n loss = F.mse_loss(y_pred.double(), batch.score.double(), reduction='none')\n \n # Backprop the average loss and update parameters\n loss.sum().backward()\n optim.step()\n\n # sum the loss for reporting, along with the denominator\n epoch_loss += loss.detach().sum()\n epoch_items += loss.numel()\n\n if batch_idx % 10 == 0:\n # Print progress\n loss_per_token = epoch_loss / epoch_items\n print('[Epoch {:<3}] loss: {:6.2f}'.format(eidx, loss_per_token))\n\n\n print('\\n[Epoch {:<3}] ended with train_loss: {:6.2f}'.format(eidx, loss_per_token))\n \n # Evaluate on valid set\n model.eval()\n check_pearson_value(model, dev_iter)\n torch.save(model, './models/model_strategy_3_epoch_{}'.format(eidx))", "def train(self, training_data):\n pass", "def trainingset(self, batchsize=None, flatten=True):\n if batchsize is None:\n batchsize = self.batchsize\n\n if self.x_train is None:\n raise AttributeError('No fold initialized... Try calling next_leaveout')\n\n return self.GENERATOR(self.x_train, self.y_train, batchsize, flatten=flatten, evaluate=False)", "def train(self):\n for epoch in range(self.current_epoch, self.config.optim.epochs):\n self.current_epoch = epoch\n self.train_one_epoch()\n if epoch % self.config.optim.val_freq == 0:\n self.validate()\n if self.config.optim.auto_schedule:\n self.scheduler.step(self.current_val_loss)\n self.save_checkpoint()", "def train_epoch(model, data_iterator, optimizer, scheduler, params):\n # set model to training mode\n model.train()\n\n # a running average object for loss\n loss_avg = utils.RunningAverage()\n \n # Use tqdm for progress bar\n one_epoch = trange(params.train_steps)\n for batch in one_epoch:\n # fetch the next training batch\n batch_data, batch_token_starts, batch_tags = next(data_iterator)\n batch_masks = batch_data.gt(0) # get padding mask\n\n # compute model output and loss\n loss = model((batch_data, batch_token_starts), token_type_ids=None, attention_mask=batch_masks, labels=batch_tags)[0]\n\n # clear previous gradients, compute gradients of all variables wrt loss\n model.zero_grad()\n loss.backward()\n\n # gradient clipping\n nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=params.clip_grad)\n\n # performs updates using calculated gradients\n optimizer.step()\n scheduler.step()\n\n # update the average loss\n loss_avg.update(loss.item())\n one_epoch.set_postfix(loss='{:05.3f}'.format(loss_avg()))", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def __next__(self) -> Union[None, Tuple[int, Dict[str, Any], Dict[str, Any]]]:\n self.epoch += 1\n self.iter_num += 1\n\n if self.iter_num > 1:\n\n # iterator exhaustion check\n if self.epoch > self.max_epoch:\n raise StopIteration\n\n # exit flag 1, when stop_fn succeeds in train_step or test_step\n if self.stop_fn_flag:\n raise StopIteration\n\n # set policy in train mode\n self.policy.train()\n\n epoch_stat: Dict[str, Any] = dict()\n\n if self.show_progress:\n progress = tqdm.tqdm\n else:\n progress = DummyTqdm\n\n # perform n step_per_epoch\n with progress(\n total=self.step_per_epoch, desc=f\"Epoch #{self.epoch}\", **tqdm_config\n ) as t:\n while t.n < t.total and not self.stop_fn_flag:\n data: Dict[str, Any] = dict()\n result: Dict[str, Any] = dict()\n if self.train_collector is not None:\n data, result, self.stop_fn_flag = self.train_step()\n t.update(result[\"n/st\"])\n if self.stop_fn_flag:\n t.set_postfix(**data)\n break\n else:\n assert self.buffer, \"No train_collector or buffer specified\"\n result[\"n/ep\"] = len(self.buffer)\n result[\"n/st\"] = int(self.gradient_step)\n t.update()\n\n self.policy_update_fn(data, result)\n t.set_postfix(**data)\n\n if t.n <= t.total and not self.stop_fn_flag:\n t.update()\n\n # for offline RL\n if self.train_collector is None:\n self.env_step = self.gradient_step * self.batch_size\n\n if not self.stop_fn_flag:\n self.logger.save_data(\n self.epoch, self.env_step, self.gradient_step, self.save_checkpoint_fn\n )\n # test\n if self.test_collector is not None:\n test_stat, self.stop_fn_flag = self.test_step()\n if not self.is_run:\n epoch_stat.update(test_stat)\n\n if not self.is_run:\n epoch_stat.update({k: v.get() for k, v in self.stat.items()})\n epoch_stat[\"gradient_step\"] = self.gradient_step\n epoch_stat.update(\n {\n \"env_step\": self.env_step,\n \"rew\": self.last_rew,\n \"len\": int(self.last_len),\n \"n/ep\": int(result[\"n/ep\"]),\n \"n/st\": int(result[\"n/st\"]),\n }\n )\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n return self.epoch, epoch_stat, info\n else:\n return None", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def next(self):\n if self._curr_batch + 1 > self.num_batches:\n # no more batches in current iteration through data set so start\n # new epoch ready for another pass and indicate iteration is at end\n self.new_epoch()\n raise StopIteration()\n # create an index slice corresponding to current batch number\n batch_slice = slice(self._curr_batch * self.batch_size,\n (self._curr_batch + 1) * self.batch_size)\n inputs_batch = self.inputs[batch_slice]\n targets_batch = self.targets[batch_slice]\n # target_ids_global = self.target_ids[batch_slice]\n target_ids_batch = self.target_ids[batch_slice]\n self._curr_batch += 1\n\n batch_inputs, batch_target_ids, batch_targets = \\\n self.transform_batch(inputs_batch, target_ids_batch, targets_batch)\n\n return batch_inputs, batch_targets, batch_target_ids", "def data_iterator(self, ithFileReader):\n print('data_iterator', ithFileReader, threading.current_thread())\n while True:\n sampX, sampY = self.sampleTrain(ithFileReader) if self.config.is_train else self.sampleValid(ithFileReader)\n yield sampX, sampY", "def train(self, ckpt=None, verbose=True):\n\t\t\n\t\tsess = self.sess\n\t\tdatasource = self.datasource\n\n\t\tif FLAGS.resume:\n\t\t\tif ckpt is None:\n\t\t\t\tckpt = tf.train.latest_checkpoint(FLAGS.logdir)\n\t\t\tself.saver.restore(sess, ckpt)\n\t\tsess.run(self.init_op)\n\n\t\tt0 = time.time()\n\t\ttrain_dataset = datasource.get_dataset('train')\n\t\ttrain_dataset = train_dataset.batch(FLAGS.batch_size)\n\t\ttrain_dataset = train_dataset.shuffle(buffer_size=10000)\n\t\ttrain_iterator = train_dataset.make_initializable_iterator()\n\t\tnext_train_batch = train_iterator.get_next()\n\n\t\tvalid_dataset = datasource.get_dataset('valid')\n\t\tvalid_dataset = valid_dataset.batch(FLAGS.batch_size)\n\t\tvalid_iterator = valid_dataset.make_initializable_iterator()\n\t\tnext_valid_batch = valid_iterator.get_next()\n\n\t\tself.train_writer = tf.summary.FileWriter(FLAGS.outdir + '/train', graph=tf.get_default_graph())\n\t\tself.valid_writer = tf.summary.FileWriter(FLAGS.outdir + '/valid', graph=tf.get_default_graph())\n\n\t\tepoch_train_losses = []\n\t\tepoch_valid_losses = []\n\t\tepoch_save_paths = []\n\n\t\tfor epoch in range(FLAGS.n_epochs):\n\t\t\tsess.run(train_iterator.initializer)\n\t\t\tsess.run(valid_iterator.initializer)\n\t\t\tepoch_train_loss = 0.\n\t\t\tnum_batches = 0.\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tself.training = True\n\t\t\t\t\tif (not self.is_binary) and (self.datasource.target_dataset != 'celebA'):\n\t\t\t\t\t\tx = sess.run(next_train_batch)[0]\n\t\t\t\t\telse:\n\t\t\t\t\t\t# no labels available for binarized MNIST\n\t\t\t\t\t\tx = sess.run(next_train_batch)\n\t\t\t\t\tif self.noisy_mnist:\n\t\t\t\t\t\t# print('training with noisy MNIST...')\n\t\t\t\t\t\tfeed_dict = {self.x: (x + np.random.normal(0, 0.5, x.shape)), self.true_x: x}\n\t\t\t\t\telse:\n\t\t\t\t\t\tfeed_dict = {self.x: x}\n\n\t\t\t\t\t# REINFORCE-style training with VIMCO or vanilla gradient update\n\t\t\t\t\tif not self.discrete_relax:\n\t\t\t\t\t\tsess.run([self.discrete_train_op1, self.discrete_train_op2], feed_dict)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# this works for both gumbel-softmax\n\t\t\t\t\t\tsess.run(self.train_op, feed_dict)\n\n\t\t\t\t\tbatch_loss, train_summary, gs = sess.run([\n\t\t\t\t\t\tself.reconstr_loss, self.summary_op, self.global_step], feed_dict)\n\t\t\t\t\tepoch_train_loss += batch_loss\n\n\t\t\t\t\t# self.train_writer.add_summary(train_summary, gs)\n\t\t\t\t\tnum_batches += 1\n\n\t\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\t\tbreak\n\t\t\t# end of training epoch; adjust temperature here if using Gumbel-Softmax\n\t\t\t# if self.discrete_relax:\n\t\t\t# \tif (counter % 1000 == 0) and (counter > 0):\n\t\t\t# \t\tself.adj_temp = np.maximum(self.tau * np.exp(-self.anneal_rate * counter), self.min_temp)\n\t\t\t# \t\tprint('adjusted temperature to: {}'.format(self.adj_temp))\n\t\t\t# enter validation phase\n\t\t\tif verbose:\n\t\t\t\tepoch_train_loss /= num_batches\n\t\t\t\tself.training = False\n\t\t\t\tif (not self.is_binary) and (self.datasource.target_dataset != 'celebA'):\n\t\t\t\t\tx = sess.run(next_valid_batch)[0]\n\t\t\t\telse:\n\t\t\t\t\t# no labels available for binarized MNIST and celebA\n\t\t\t\t\tx = sess.run(next_valid_batch)\n\t\t\t\tif self.noisy_mnist:\n\t\t\t\t\t# print('training with noisy MNIST...')\n\t\t\t\t\tfeed_dict = {self.x: (x + np.random.normal(0, 0.5, x.shape)), self.true_x: x}\n\t\t\t\telse:\n\t\t\t\t\tfeed_dict = {self.x: x}\n\n\t\t\t\t# save run stats\n\t\t\t\tepoch_valid_loss, valid_summary, gs = sess.run([self.test_loss, self.summary_op, self.global_step], feed_dict=feed_dict)\n\t\t\t\tif epoch_train_loss < 0: # note: this only applies to non-binary data since it's L2 loss\n\t\t\t\t\tprint('Epoch {}, (no sqrt) l2 train loss: {:0.6f}, l2 valid loss: {:0.6f}, time: {}s'. \\\n\t\t\t\tformat(epoch+1, epoch_train_loss, np.sqrt(epoch_valid_loss), int(time.time()-t0)))\n\t\t\t\telse:\n\t\t\t\t\tprint('Epoch {}, l2 train loss: {:0.6f}, l2 valid loss: {:0.6f}, time: {}s'. \\\n\t\t\t\t\t\t\tformat(epoch+1, np.sqrt(epoch_train_loss), np.sqrt(epoch_valid_loss), int(time.time()-t0)))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tsave_path = self.saver.save(sess, os.path.join(FLAGS.logdir, 'model.ckpt'), global_step=gs)\n\t\t\t\tepoch_train_losses.append(epoch_train_loss)\n\t\t\t\tepoch_valid_losses.append(epoch_valid_loss)\n\t\t\t\tepoch_save_paths.append(save_path)\n\t\tbest_ckpt = None\n\t\tif verbose:\n\t\t\tmin_idx = epoch_valid_losses.index(min(epoch_valid_losses))\n\t\t\tprint('Restoring ckpt at epoch', min_idx+1,'with lowest validation error:', epoch_save_paths[min_idx])\n\t\t\tbest_ckpt = epoch_save_paths[min_idx]\n\t\treturn (epoch_train_losses, epoch_valid_losses), best_ckpt", "def train(self, data_loader):\n step = 0\n train_data, valid_data = data_loader()\n\n # Allow to call `next` builtin indefinitely.\n valid_data = iter(valid_data.repeat())\n\n for epoch in range(self.hparams.num_epochs):\n for x, y in train_data:\n\n with tf.GradientTape() as g:\n train_loss = self.loss(y, self(x))\n\n grads = g.gradient(train_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, self.trainable_variables))\n\n # Validate every 1000 training steps.\n if step % 1000 == 0:\n x, y = next(valid_data)\n valid_loss = self.loss(y, self(x))\n print(\n f\"step {step} (train_loss={train_loss} valid_loss={valid_loss})\"\n )\n step += 1\n\n print(f\"epoch {epoch} finished\")\n self.save()", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train(self, ):\n raise NotImplementedError", "def train(self):\n raise NotImplementedError", "def _create_test_iterator(self):\n input_ids = tf.range(self.left_images.shape[0])\n dataset = tf.data.Dataset.from_tensor_slices(input_ids)\n # NOTE: Loads 1 sample, i.e. batch mode not implemented yet.\n dataset = dataset.map(self._test_parse_function)\n iterator = dataset.make_one_shot_iterator()\n\n return iterator", "def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):\n # 加入学习曲线显示\n weight_url = '//home//jim//shanghai_index//data//weights.params'\n logdir_url = '//home//jim//shanghai_index//log'\n sw = SummaryWriter(logdir = logdir_url, flush_secs=10)\n # 提取已有参数\n if os.path.exists(weight_url):\n print(u'已含有旧权重文件,正在载入继续训练并更新')\n net.load_parameters(weight_url, allow_missing = True, ignore_extra = True)\n # 训练\n for epoch in range(num_epochs):\n train_loss, train_acc, train_step, start = 0.0, 0.0, 0, time.time()\n for x1, x2, y in train_iter:\n batch_size = x1.shape[0]\n with autograd.record():\n y_hats = net(x1, x2)\n print('y_hat = {}'.format(y_hats))\n print('y = {}'.format(y))\n ls = loss(y_hats, y)\n print('loss:{}'.format(ls))\n ls.backward()\n trainer.step(batch_size)\n train_loss += np.mean(ls.asnumpy())\n train_acc += acc(y_hats, y)\n train_step += 1\n \n print('epoch {}, loss {}, train acc {}, time {} sec'.format(epoch + 1,\n train_loss/train_step,\n train_acc/train_step,\n time.time() - start))\n # 向tensorboard填数据\n sw.add_scalar(tag = 'Loss_and_acc', \\\n value = {'train_loss': train_loss/train_step, 'train_acc': train_acc/train_step}, \\\n global_step = epoch)\n # 加入某个层权重分布变化等高图\n grads = [i.grad() for i in net.collect_params('.*weight|.*bias').values()]\n param_names = [name for name in net.collect_params('.*weight|.*bias').keys()]\n assert len(grads) == len(param_names)\n # logging the gradients of parameters for checking convergence\n for i, name in enumerate(param_names):\n sw.add_histogram(tag = name, values = grads[i], global_step = epoch, bins = 20)\n\n # 加入保存参数\n net.save_parameters(weight_url)", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def training_loop(self):\n # Get correct train function based on parameters.\n self.get_train_step_functions()\n\n for self.growth_idx in range(self.num_growths):\n self.growth_step = 0\n self.block_idx = (self.growth_idx + 1) // 2\n print(\n \"\\nblock_idx = {}, growth_idx = {}\".format(\n self.block_idx, self.growth_idx\n )\n )\n print(\n \"\\ngenerator_model = {}\".format(\n self.network_objects[\"generator\"].models[self.growth_idx].summary()\n )\n )\n print(\n \"\\ndiscriminator_model = {}\".format(\n self.network_objects[\"discriminator\"].models[self.growth_idx].summary()\n )\n )\n\n global_batch_size = (\n self.global_batch_size_schedule[self.block_idx]\n )\n steps_per_epoch = (\n self.params[\"train_dataset_length\"] // global_batch_size\n )\n\n for epoch in range(self.params[\"num_epochs\"]):\n print(\"\\ngrowth_idx = {}, epoch = {}\".format(self.growth_idx, epoch))\n self.previous_timestamp = tf.timestamp()\n\n self.epoch_step = 0\n while self.epoch_step < steps_per_epoch:\n # Train discriminator.\n (growth_phase_complete,\n features,\n labels) = self.network_model_training_steps(\n epoch=epoch,\n train_step_fn=self.discriminator_train_step_fn,\n train_steps=self.params[\"discriminator_train_steps\"],\n train_dataset_iter=(\n self.train_datasets[self.block_idx]\n ),\n features=None,\n labels=None\n )\n\n if growth_phase_complete:\n break # break while loop\n\n # Train generator.\n (growth_phase_complete,\n _,\n _) = self.network_model_training_steps(\n epoch=epoch,\n train_step_fn=self.generator_train_step_fn,\n train_steps=self.params[\"generator_train_steps\"],\n train_dataset_iter=None,\n features=features,\n labels=labels\n )\n\n if growth_phase_complete:\n break # break while loop\n\n if growth_phase_complete:\n break # break epoch for loop\n\n if self.params[\"export_every_growth_phase\"]:\n self.export_saved_model()", "def train(args, trainer, epoch_itr):\n\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr()\n\n # update parameters every N batches\n if epoch_itr.epoch <= len(args.update_freq):\n update_freq = args.update_freq[epoch_itr.epoch - 1]\n else:\n update_freq = args.update_freq[-1]\n\n max_update = args.max_update or math.inf\n num_batches = len(epoch_itr)\n torch.cuda.synchronize()\n begin = time.time()\n\n # reset meters\n DLLogger.flush()\n trainer.get_throughput_meter().reset()\n\n for i, sample in enumerate(itr):\n if i < num_batches - 1 and (i + 1) % update_freq > 0:\n # buffer updates according to --update-freq\n trainer.train_step(sample, update_params=False, last_step=(i == len(itr)-1))\n continue\n else:\n trainer.train_step(sample, update_params=True, last_step=(i == len(itr)-1))\n\n # ignore the first mini-batch in words-per-second calculation\n if i == 0:\n trainer.get_throughput_meter().reset()\n reset_perf_meters()\n\n if (i+1) % args.log_interval == 0:\n DLLogger.flush()\n\n if trainer.get_num_updates() >= max_update:\n break\n\n torch.cuda.synchronize()\n print('Epoch time:', time.time() - begin)\n\n # Print epoch stats and reset training meters\n DLLogger.log(step=trainer.get_num_updates(),\n data={'speed': trainer.get_throughput_meter().avg}, verbosity=0)\n DLLogger.flush()", "def train(self, num_training_steps):\n observation = self.env_pool.reset()\n for step in trange(num_training_steps):\n observations, actions, rewards, dones = self.collect_batch(observation)\n policy_loss, value_loss, entropy = self.train_on_batch(observations, actions, rewards, dones)\n self.writer.add_scalar('policy_loss', policy_loss, step)\n self.writer.add_scalar('value_loss', value_loss, step)\n self.writer.add_scalar('entropy', entropy, step)\n observation = observations[-1]", "def train(self, training, epochs, group):\n for epoch in range(epochs):\n self.input_matrix={}\n self.back_propagation_learning(training)\n acc = accuracy(self, group)\n print(\"Accuracy on epoch {} is {} \".format(epoch, acc))", "def _train(self):\n self.train_acc.reset_states()\n self.val_acc.reset_states()\n self.train_loss.reset_states()\n self.val_loss.reset_states()\n\n self.train_ds.shuffle(buffer_size=1000)\n for idx, (x,y) in enumerate(self.train_ds):\n self.tf_train_step(x, y)\n\n for x,y in self.val_ds:\n self.tf_val_step(x, y)\n\n # It is important to return tf.Tensors as numpy objects.\n return {\n \"epoch\": self.iteration,\n \"loss_train\": self.train_loss.result().numpy(),\n \"loss_val\": self.val_loss.result().numpy(),\n \"acc_train\": self.train_acc.result().numpy(),\n \"acc_val\": self.val_acc.result().numpy(),\n }", "def train(self, train_ids_file):\n\t\t# TODO(student): Feel free to remove if you do not use.\n\t\ttf.keras.backend.set_learning_phase(0)\n\t\tfile_pairs = tf.data.Dataset.zip(get_filename_data_readers(train_ids_file, True))\n\t\t#training_dataset = file_pairs.shuffle(buffer_size=2500).map(read_image_pair_with_padding).batch(self.batch_size)\n\t\ttraining_dataset = file_pairs.map(read_image_pair_with_padding).batch(self.batch_size)\n\t\ttraining_dataset_X = training_dataset.map(lambda a, b: a)\n\t\ttraining_dataset_Y = training_dataset.map(lambda a, b: b)\n\n\t\ttraining_iterator_X = training_dataset_X.make_initializable_iterator()\n\t\ttraining_iterator_Y = training_dataset_Y.make_initializable_iterator()\n\n\t\tfor i in range(5):\n\t\t\tself.sess.run(training_iterator_X.initializer)\n\t\t\tself.sess.run(training_iterator_Y.initializer)\n\t\t\ttraining_handle_X = self.sess.run(training_iterator_X.string_handle())\n\t\t\ttraining_handle_Y = self.sess.run(training_iterator_Y.string_handle())\n\t\t\tj = 0\n\t\t\tloss_ary = []\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\t[train_loss, train_step, train_pred] = self.sess.run(\n\t\t\t\t\t\t[self.loss, self.train_op, self.pred],\n\t\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\t\tself.is_training: True,\n\t\t\t\t\t\t\t\tself.handle_X: training_handle_X,\n\t\t\t\t\t\t\t\tself.handle_Y: training_handle_Y,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t\t'''\n\t\t\t\t\tif j == 0:\n\t\t\t\t\t\tplt.imshow(train_pred[0])\n\t\t\t\t\t\tplt.colorbar()\n\t\t\t\t\t\tplt.show()\n\t\t\t\t\t\tpdb.set_trace()\n\t\t\t\t\t'''\n\t\t\t\t\tloss_ary.append(train_loss)\n\t\t\t\t\tj += 1\n\t\t\t\t\tprint('Epoch', i, 'Batch', j, train_loss)\n\t\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\t\tbreak\n\t\t\tself.sess.run(self.lr_decay_op)\n\t\t\tprint('Apply lr decay, new lr: %f' % self.sess.run(self.lr))\n\t\t\tprint(f'Epoch: {i}, Avg Loss: {np.mean(loss_ary)}')\n\t\t\tself.save(\"model_file_no_{}.pickle\".format(i))\n\n\n\t\tprint('Done training')", "def train_input_fn():\n # Initialize `iterator` with training data.\n train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]\n return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)", "def __iter__(self):\n for run in self.runs:\n yield run", "def train(self, train_loader):\n\n self.model.train()\n with torch.enable_grad():\n return self._iteration(train_loader)", "def train(self, sample_list, epochs=20, iterations=None, callbacks=[]):\n # Initialize Keras Data Generator for generating batches\n dataGen = DataGenerator(sample_list, self.preprocessor, training=True,\n validation=False, shuffle=self.shuffle_batches,\n iterations=iterations)\n # Run training process with Keras fit\n self.model.fit(dataGen,\n epochs=epochs,\n callbacks=callbacks,\n workers=self.workers,\n max_queue_size=self.batch_queue_size)\n # Clean up temporary files if necessary\n if self.preprocessor.prepare_batches or self.preprocessor.prepare_subfunctions:\n self.preprocessor.data_io.batch_cleanup()", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train_step(self):\r\n batch_images = next(self.data_loader.next_batch())\r\n _, loss, summary, ea = self.sess.run([self.model.train_op, self.model.total_loss, self.model.merged, self.model.euclidean_a_p],\r\n feed_dict={self.model.input: batch_images, self.model.is_training: True})\r\n \r\n return loss, summary", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train_model(model, epochs, optimizer, loss_function, train_iterator, valid_iterator):\n for epoch in range(epochs):\n model.train()\n train_loss = 0.0\n train_acc = 0.0\n for i, batch in enumerate(train_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n optimizer.zero_grad()\n\n output = model(feature, batch_length)\n\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n train_acc += acc.item()\n print(\n f\"Train:: Epoch: {epoch}, Loss: {train_loss / len(train_iterator)}, Accuracy: {train_acc / len(train_iterator)}\")\n\n model.eval()\n val_loss = 0.0\n val_acc = 0.0\n for i, batch in enumerate(valid_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n\n output = model(feature, batch_length)\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n val_loss += loss.item()\n val_acc += acc.item()\n\n print(\n f\"Validation:: Epoch: {epoch}, Loss: {val_loss / len(valid_iterator)}, Accuracy: {val_acc / len(valid_iterator)}\")\n print(\"\")" ]
[ "0.7524614", "0.7506505", "0.7341301", "0.7287756", "0.7240162", "0.71176016", "0.7104605", "0.70856947", "0.70841146", "0.6950423", "0.6944791", "0.6935007", "0.6922542", "0.6916224", "0.6903519", "0.68901885", "0.6874518", "0.68721944", "0.6839896", "0.68311983", "0.68162155", "0.6801768", "0.6787648", "0.6771482", "0.67539996", "0.6732291", "0.67137545", "0.67128164", "0.67096996", "0.6708402", "0.6689353", "0.6656891", "0.6653478", "0.66533613", "0.6625091", "0.6619542", "0.66024065", "0.6592954", "0.6591568", "0.6567929", "0.65600055", "0.6544386", "0.65357864", "0.6535094", "0.64715385", "0.646602", "0.64624393", "0.645804", "0.6423817", "0.64213043", "0.6402002", "0.6398393", "0.63897336", "0.63859314", "0.6382169", "0.6367141", "0.6354784", "0.6348812", "0.63472223", "0.6344248", "0.63398623", "0.6338326", "0.6338271", "0.63147247", "0.6293085", "0.6290323", "0.6284541", "0.62840843", "0.62840843", "0.6276243", "0.62738335", "0.6272322", "0.6263272", "0.6261321", "0.6258949", "0.6258116", "0.62501955", "0.6250125", "0.62500495", "0.62370837", "0.622974", "0.6226839", "0.6226323", "0.6223548", "0.62231356", "0.6213666", "0.6210189", "0.61964273", "0.6196259", "0.6195865", "0.6188585", "0.61878383", "0.6185594", "0.6184775", "0.6184583", "0.6184583", "0.6184583", "0.6184583", "0.6184583", "0.6177375" ]
0.79244447
0
A subfunction with a general weights initialization.
def _reset_weights(m): nn = import_optional_dependency("torch.nn") init = import_optional_dependency("torch.nn.init") if isinstance(m, nn.Conv1d): init.normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) elif isinstance(m, nn.Conv2d): init.xavier_normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) elif isinstance(m, nn.Conv3d): init.xavier_normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) elif isinstance(m, nn.ConvTranspose1d): init.normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) elif isinstance(m, nn.ConvTranspose2d): init.xavier_normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) elif isinstance(m, nn.ConvTranspose3d): init.xavier_normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) elif isinstance(m, nn.BatchNorm1d): init.normal_(m.weight.data, mean=1, std=0.02) init.constant_(m.bias.data, 0) elif isinstance(m, nn.BatchNorm2d): init.normal_(m.weight.data, mean=1, std=0.02) init.constant_(m.bias.data, 0) elif isinstance(m, nn.BatchNorm3d): init.normal_(m.weight.data, mean=1, std=0.02) init.constant_(m.bias.data, 0) elif isinstance(m, nn.Linear): init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for param in m.parameters(): if len(param.shape) >= 2: init.orthogonal_(param.data) else: init.normal_(param.data) elif isinstance(m, nn.LSTMCell): for param in m.parameters(): if len(param.shape) >= 2: init.orthogonal_(param.data) else: init.normal_(param.data) elif isinstance(m, nn.GRU): for param in m.parameters(): if len(param.shape) >= 2: init.orthogonal_(param.data) else: init.normal_(param.data) elif isinstance(m, nn.GRUCell): for param in m.parameters(): if len(param.shape) >= 2: init.orthogonal_(param.data) else: init.normal_(param.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_weights_(self):\n raise NotImplementedError", "def init_weight(w):\n shape = w.shape\n if len(shape) == 4:\n i, o, u, v = shape\n k = np.sqrt(6 / (i * u * v + o * u * v))\n w.data.uniform_(-k, k)\n elif len(shape) == 2:\n k = np.sqrt(6 / sum(shape))\n w.data.uniform_(-k, k)\n elif len(shape) == 1:\n w.data.zero_()", "def init_weight(self):\n init_bn(self.norm0)", "def weight_init_(layer, func, weight_name=None, bias_name=None, bias_value=0.0, **kwargs):\n\n if hasattr(layer, 'weight') and layer.weight is not None:\n getattr(init, func)(**kwargs)(layer.weight)\n if weight_name is not None:\n # override weight name\n layer.weight.name = weight_name\n\n if hasattr(layer, 'bias') and layer.bias is not None:\n init.Constant(bias_value)(layer.bias)\n if bias_name is not None:\n # override bias name\n layer.bias.name = bias_name", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()", "def __init__(self, sizes, afunc): \n\t\tself.num_layers = len(sizes)\n\t\tself.sizes = sizes\n\t\tself.afunc = afunc;\n\t\tself.initialize_weights_uniform()\n\t\t#self.initialize_weights_gaussian(0.1)\n\t\t#self.initialize_weights_xavier()", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def init_weights(self):\n \n self.w = np.random.randn(self.D) / np.sqrt(self.D)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def init_sg_weights(self):\n n = self.weights_shape[0] # size of current layer\n # pylint: disable=no-member\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n # pylint: enable=no-member\n self.sg_weights = [A, B, C]", "def init_weights(self, init_w=3e-3):\n self.mean_linear.weight.data.uniform_(-init_w, init_w)\n self.mean_linear.bias.data.uniform_(-init_w, init_w)\n self.log_std_linear.weight.data.uniform_(-init_w, init_w)\n self.log_std_linear.bias.data.uniform_(-init_w, init_w)", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def init_weights(self):\n if self.init_cfg:\n super().init_weights()\n else:\n # Use smaller std for better stability and performance. We\n # use 0.1. See more details in \"ESRGAN: Enhanced Super-Resolution\n # Generative Adversarial Networks\"\n for m in [\n self.conv_first, self.conv_body, self.conv_up1,\n self.conv_up2, self.conv_hr, self.conv_last\n ]:\n default_init_weights(m, 0.1)", "def Winit(Weightinit):\n\n DicWinit = {'LecunNormal':tf.keras.initializers.lecun_normal(seed=None),\n 'LecunUniform':tf.keras.initializers.lecun_uniform(seed=None),\n 'GlorotNormal':tf.keras.initializers.GlorotNormal(seed=None),\n 'GlorotUniform':tf.keras.initializers.GlorotUniform(seed=None),\n 'HeNormal':tf.keras.initializers.he_normal(seed=None),\n 'HeUniform':tf.keras.initializers.he_uniform(seed=None)}\n return DicWinit[Weightinit]", "def _initialize_weights(self):\n pass", "def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]", "def weights_init(mod):\n classname = mod.__class__.__name__\n if classname.find('Conv') != -1:\n mod.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n mod.weight.data.normal_(1.0, 0.02)\n mod.bias.data.fill_(0)", "def init_weights(model):\n ...", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def init_weights(self):\n self._q_neuron.h(self._weights) \n self._q_neuron.x(self._weights)", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def init_weights(self):\n for i in range(5):\n default_init_weights(getattr(self, f'conv{i+1}'), 0.1)", "def init_weight(self):\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)", "def init_weights(self) -> None:\n super().init_weights()\n kaiming_init(self.convs_all_levels, a=1, distribution='uniform')\n kaiming_init(self.conv_branch, a=1, distribution='uniform')\n kaiming_init(self.conv_pred, a=1, distribution='uniform')", "def weight_initialization(m: nn.Module) -> None:\n if type(m) == nn.Linear:\n # nn.init.xavier_uniform_(m.weight, np.sqrt(2.)) # gain is sqrt(2) because we use ReLU\n nn.init.kaiming_uniform_(m.weight, a=np.sqrt(5))\n # nn.init.uniform(m.weight, 0., 0.)", "def InitWeights(self):\n self.w = -1 + 2 * np.random.rand(self.num_of_inputs,)\n self.w0 = -1 + 2 * np.random.rand()", "def initialise_weights(self): \n \n def initialise_process(param):\n \n \"\"\"\n Initialises weights of a given parameter following either Xavier or Kaiming uniform or normal processes.\n \n : param (torch.Tensor):\n \n \"\"\"\n \n if self._initialisation_process == 'xavier_uniform':\n tnni.xavier_uniform_(param.data)\n elif self._initialisation_process == 'xavier_normal':\n tnni.xavier_normal_(param.data)\n elif self._initialisation_process == 'kaiming_uniform':\n tnni.kaiming_uniform_(param.data)\n elif self._initialisation_process == 'kaiming_normal':\n tnni.kaiming_normal_(param.data)\n \n if self._initialisation_process is not None:\n for m in self.modules():\n # Embedding\n if type(m) is nn.Embedding:\n tnni.normal_(self.embedding.weight)\n # RNN\n elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]: \n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n initialise_process(param)\n #torch.nn.init.kaiming_normal_(param.data)\n elif 'weight_hh' in name:\n tnni.orthogonal_(param.data)\n elif 'bias' in name:\n # Bias initialised with zero will get the bias from\n # the forget gate\n param.data.fill_(0.0)\n param.data[self._hidden_size:self.directions*self._hidden_size].fill_(1.0)\n # Attention linear layer\n elif type(m) is nn.Linear:\n for name, param in m.named_parameters():\n if 'weight' in name:\n initialise_process(param.data)\n elif 'bias' in name:\n param.data.normal_()", "def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))", "def initialize_weights(self, shape):\n # Our function is not convex, so initialization with zero is not helpful\n return he_weights_initialization(shape)", "def my_assign_weights(context, data):\n pass", "def init_weights(self, dims):\n self.W = np.random.normal(size=dims) * 0.0001", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def layer_weight_init(self, size):\n # TODO: make smarter init\n return np.random.uniform(size=size)", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n normal_init(self.atss_reg, std=0.01)\n normal_init(self.atss_iou, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.atss_cls, std=0.01, bias=bias_cls)", "def init_weights(self) -> None:\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)", "def init_weights(self, init_w=3e-3):\n self.l3.weight.data.uniform_(-init_w, init_w)\n self.l3.bias.data.uniform_(-init_w, init_w)", "def init_weights(self, init_w=3e-3):\n self.l3.weight.data.uniform_(-init_w, init_w)\n self.l3.bias.data.uniform_(-init_w, init_w)", "def init_weights(self, load_weights=None):\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member", "def init_weights(n_input_layer, n_hidden_layer, n_hidden_layer_2, n_output_layer, xavier_init):\n W1, W2, W3 = None, None, None\n \n if xavier_init: # Checks if Xavier initialisation is wanted\n # Initialises weights depending on number of layers present using:\n # Normally distributed random number * square_root(1 / number of input neurons to that layer)\n if n_hidden_layer > 0:\n W1 = np.random.randn(n_hidden_layer, n_input_layer) * np.sqrt(1 / (n_input_layer))\n\n if n_hidden_layer_2 > 0:\n W2 = np.random.randn(n_hidden_layer_2, n_hidden_layer) * np.sqrt(1 / (n_hidden_layer))\n W3 = np.random.randn(n_output_layer, n_hidden_layer_2) * np.sqrt(1 / (n_hidden_layer_2))\n\n else:\n W2 = np.random.randn(n_output_layer, n_hidden_layer) * np.sqrt(1 / (n_hidden_layer))\n\n else:\n W1 = np.random.randn(n_output_layer, n_input_layer) * np.sqrt(1 / (n_input_layer))\n\n else:\n # Weights are randomly picked from a uniform distribution between 0 and 1\n # They are normalized by making sure the weights sum to 1\n # Uses different configurations depending on number of layers required\n if n_hidden_layer > 0:\n W1 = np.random.uniform(0,1,(n_hidden_layer, n_input_layer))\n W1 = np.divide(W1,np.matlib.repmat(np.sum(W1,1)[:,None],1,n_input_layer))\n \n if n_hidden_layer_2 > 0:\n W2=np.random.uniform(0,1,(n_hidden_layer_2,n_hidden_layer))\n W2=np.divide(W2,np.matlib.repmat(np.sum(W2,1)[:,None],1,n_hidden_layer))\n\n W3=np.random.uniform(0,1,(n_output_layer,n_hidden_layer_2))\n W3=np.divide(W3,np.matlib.repmat(np.sum(W3,1)[:,None],1,n_hidden_layer_2))\n\n else:\n W2 = np.random.uniform(0,1,(n_output_layer, n_hidden_layer))\n W2 = np.divide(W2,np.matlib.repmat(np.sum(W2,1)[:,None],1,n_hidden_layer))\n\n else:\n W1 = np.random.randn(n_output_layer, n_input_layer) * np.sqrt(1 / (n_input_layer))\n\n return W1, W2, W3", "def weights_init(m):\n if type(m) == torch.nn.Linear:\n m.weight.data.normal_(0.0, 0.02)\n m.bias.data.fill_(0)", "def __init__(self, weights, function):\n\n\t\tif(len(weights)<2):\n\t\t\traise ValueError(\"The weight vector should have at least two elements\")\n\n\t\tself.weights = np.array(weights)\n\t\tself.function = function", "def setWeightInitializer(self,weights):\n self.init_w = weights", "def __init__(self, weights:np.ndarray):\n self.w = weights.copy()", "def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False):\n if isinstance(module, nn.Linear):\n if name.startswith('head'):\n nn.init.zeros_(module.weight)\n nn.init.constant_(module.bias, head_bias)\n else:\n if flax:\n # Flax defaults\n lecun_normal_(module.weight)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n else:\n # like MLP init in vit (my original init)\n nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n if 'mlp' in name:\n nn.init.normal_(module.bias, std=1e-6)\n else:\n nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Conv2d):\n lecun_normal_(module.weight)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.ones_(module.weight)\n nn.init.zeros_(module.bias)\n elif hasattr(module, 'init_weights'):\n # NOTE if a parent module contains init_weights method, it can override the init of the\n # child modules as this will be called in depth-first order.\n module.init_weights()", "def init_weights(self):\n r = np.sqrt(6.) / np.sqrt(self.fc.in_features +\n self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)", "def init_weights(self):\n r = np.sqrt(6.) / np.sqrt(self.fc.in_features +\n self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)", "def init_weights(net, init_gain=0.02, net_name='network'):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') !=-1):\n init.normal_(m.weight.data, 0.0, init_gain)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n \n net.apply(init_func)\n print(f'initialize {net_name} with normal distribution')", "def __init__(self, weight: float = 1.0):\n\n super().__init__()\n self.weight = weight", "def init_weights(self, leveledinit: bool, kernel_size: int, bias: bool) -> None:\n if leveledinit:\n nn.init.normal_(self.conv1d.weight, std=1e-3)\n nn.init.normal_(self.conv1d.bias, std=1e-6)\n with torch.no_grad():\n self.conv1d.weight[:, 0, :] += 1.0 / kernel_size\n else:\n nn.init.xavier_uniform_(self.conv1d.weight)\n\n if self.embed in (\"pre\", \"post\"):\n nn.init.xavier_uniform_(self.embedding.weight)", "def weights_init(m):\n if (\n isinstance(m, nn.Linear)\n or isinstance(m, nn.EmbeddingBag)\n or isinstance(m, nn.Embedding)\n or isinstance(m, SparseLinear)\n ):\n nn.init.xavier_normal_(m.weight)", "def __init__(self, weights):\n self._weights = weights", "def apply_on_layer(self, layer):\n init_g = Constant(1.)\n\n try:\n weight_tag = 'W' if hasattr(layer, 'W') else 'U'\n except AttributeError:\n raise AttributeError(\"Trying to call weight norm on {} \".format(layer)+\\\n \"without layer.W or layer.U defined\")\n weights = getattr(layer, weight_tag)\n\n Wndim = weights.get_value().ndim\n if Wndim == 4:\n W_axes_to_sum = (1,2,3)\n W_dimshuffle_args = (0,'x','x','x')\n elif Wndim == 5:\n W_axes_to_sum = (1,2,3,4)\n W_dimshuffle_args = (0,'x','x','x','x')\n elif Wndim == 3 :\n raise NotImplementedError(\"What is a weight with 3 dimensions?\")\n else :\n W_axes_to_sum = 0\n W_dimshuffle_args = ('x',0)\n\n if self.train_g is not None:\n g = init_g(layer.output_dims)\n g = theano.shared(g, name=layer.prefix+'_g')\n if self.train_g :\n layer.params += [g]\n\n new_weights = weights * (\n g / T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum))).dimshuffle(*W_dimshuffle_args)\n layer.g = g\n else:\n new_weights = weights / \\\n T.sqrt(1e-6 + T.sum(T.square(weights),\n axis=W_axes_to_sum,keepdims=True))\n\n setattr(layer, weight_tag, new_weights)", "def init_weights(self):\n\n params = torch.load(self.resnet_weight)\n\n self.fc1.weight.data = params['state_dict']['module.fc.weight'].clone()\n self.fc1.bias.data = params['state_dict']['module.fc.bias'].clone()\n\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc4.in_features +\n self.fc4.out_features)\n self.fc4.weight.data.uniform_(-r, r)\n self.fc4.bias.data.fill_(0)", "def init_weights(self) -> None:\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def weights_initializer(self):\n self.weights = [np.random.normal(0, 1 / np.sqrt(x), (x, y)) for x, y in list(zip(self.structure[1:], self.structure[:-1]))]", "def init_weights(self):\r\n self.embedding.weight.data.uniform_(-0.1, 0.1)\r\n self.fc.bias.data.fill_(0)\r\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\n r = np.sqrt(6.) / np.sqrt(self.fc1.in_features +\n self.fc1.out_features)\n self.fc1.weight.data.uniform_(-r, r)\n self.fc1.bias.data.fill_(0)\n r = np.sqrt(6.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)", "def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0.1, std=0.01)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(m):\n\tif type(m) == nn.Linear:\n\t\ttorch.nn.init.xavier_normal(m.weight)\n\t\tm.bias.data.fill_(0.01)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self, sizes, init, X, Y):\n\t\tinit = init.lower()\n\n\t\tif init == 'none':\n\t\t\tpass\n\t\telif init == 'zeros':\n\t\t\tself.wts = arr([np.zeros((sizes[i + 1],sizes[i] + 1)) for i in range(len(sizes) - 1)], dtype=object)\n\t\telif init == 'random':\n\t\t\tself.wts = arr([.0025 * np.random.randn(sizes[i+1],sizes[i]+1) for i in range(len(sizes) - 1)], dtype=object)\n\t\telse:\n\t\t\traise ValueError('NNetClassify.init_weights: ' + str(init) + ' is not a valid option for init')", "def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.retina_cls, std=0.01, bias=bias_cls)\n normal_init(self.retina_reg, std=0.01)", "def init_weights(self):\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc1.in_features +\n self.fc1.out_features)\n self.fc1.weight.data.uniform_(-r, r)\n self.fc1.bias.data.fill_(0)", "def init_weights(self, input_size=None, min_w=WEIGHT_MIN,\n max_w=WEIGHT_MAX):\n if input_size is None:\n input_size = self.INPUT_SIZE\n\n # Add a bias weight to each neuron\n weights_per_neuron = input_size + 1\n\n self.weights = np.random.rand(self.size, weights_per_neuron) \\\n * (max_w - min_w) + min_w", "def get_weights(self):", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def initWeights(self):\n self.weights = []\n self.bias = []\n for i, dim in enumerate(self.dimensions[1:]):\n self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))\n self.bias.append(np.random.uniform(-1,1,dim))", "def __init__(self, weights, biases):\n super().__init__()\n\n self.weights = weights\n self.biases = biases", "def initialize_weights(root, name=\"InitializeWeights\"):\n initialize_ops = []\n\n def initialize(node):\n if isinstance(node, Weights):\n initialize_ops.append(node.initialize())\n if isinstance(node, GaussianLeaf):\n initialize_ops.extend(node.initialize())\n\n with tf.name_scope(name):\n # Get all assignment operations\n traverse_graph(root, fun=initialize, skip_params=False)\n\n # Return collective operation\n return tf.group(*initialize_ops)", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _WeightInit(self, stddev):\n return init_ops.truncated_normal_initializer(stddev=stddev)", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def init_weights(self):\n with torch.no_grad():\n self._init_weights()", "def _init_weights(layer):\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass", "def get_weights(fs, W_init=None, std_scale=0.4):\n\n if W_init == None:\n stddev = std_scale*np.sqrt(2.0 / np.prod(fs[:3]))\n W_init = torch.normal(torch.zeros(*fs), stddev)\n\n W_init = nn.Parameter(W_init) \n return W_init", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def init_weights(self):\n self.transformer.init_weights()\n if self.loss_cls.use_sigmoid:\n bias_init = bias_init_with_prob(0.01)\n for m in self.cls_branches:\n nn.init.constant_(m[-1].bias, bias_init)", "def weight_setup(self, weighting):\n if weighting == \"overlap\":\n self.weights = overlap_generator(overlap, self.graph)\n elif weighting == \"unit\":\n self.weights = overlap_generator(unit, self.graph)\n elif weighting == \"min_norm\":\n self.weights = overlap_generator(min_norm, self.graph)\n else:\n self.weights = overlap_generator(normalized_overlap, self.graph)", "def init_weights(w_shape, layer_index, weight_initializer):\n\n return tf.Variable(weight_initializer(w_shape), name=\"weight{}\".format(layer_index))", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def init_weights(net, init_gain=0.02):\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n init.normal_(m.weight.data, 0.0, init_gain)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network')\n net.apply(init_func) # apply the initialization function <init_func>", "def default_weights(n):\n return np.array([1/n for _ in range(n)])", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def _update_samples_weight(self):\n m, n = 0, self.u.shape[0]\n T = self.u.shape[1]\n N = n + T\n d_0 = matrix(self.d_0.reshape(n, 1))\n\n # Linear Inequallity Constraints, Gx <= h\n G = matrix(-1 * np.eye(N))\n h = matrix(np.zeros(shape=(N, 1)))\n\n # Linear Equality Constraints, Ax = b\n A = matrix(np.concatenate((np.ones(shape=(T, 1)), np.zeros(shape=(n, 1))), axis=0).T)\n b = matrix(1.0)\n\n def F(x=None, z=None):\n if x is None: return 0, matrix(0.5, (N, 1))\n w = x[:T, :]\n phi = x[T:, :]\n reg_inv = 1 / self.reg\n\n weighted_u = np.dot(self.u, w) # n x 1\n scores = -1 * reg_inv * (weighted_u + phi) # n x 1\n\n # Numeric correction\n scores -= max(scores)\n\n # Auxilliaries\n weighted_scores_exp = np.multiply(d_0, np.exp(scores))\n sum_weighted_scores_exp = np.sum(weighted_scores_exp)\n sum_weighted_scores_exp_square = sum_weighted_scores_exp ** 2\n squared_weighted_scores_exp = np.square(weighted_scores_exp)\n weighted_scores_exp_mults = np.dot(weighted_scores_exp, weighted_scores_exp.T)\n uw_mult = np.multiply(self.u, weighted_scores_exp)\n uw_mult_sum = np.sum(np.multiply(self.u, weighted_scores_exp), axis=0)\n\n f = self.reg * np.log(sum_weighted_scores_exp) + self.kappa * np.sum(phi) # f(x)\n\n dfdw = -1 * uw_mult_sum.T / sum_weighted_scores_exp\n dfdphi = (-1 * weighted_scores_exp / sum_weighted_scores_exp) + self.kappa\n Df = np.concatenate((dfdw, dfdphi), axis=0) # Gradient\n\n mf = matrix(f)\n mDf = matrix(Df.T)\n if z is None:\n return mf, mDf\n # Assumes d_0 is uniform\n H = np.zeros(shape=(N, N)) # Hessian\n dfdwiwi = np.zeros(shape=(T, 1))\n dfdphiiphij = -1 * reg_inv * (np.tril(weighted_scores_exp_mults)) / sum_weighted_scores_exp_square\n dfdphiiphii = reg_inv * (np.multiply(weighted_scores_exp,\n sum_weighted_scores_exp - weighted_scores_exp) / sum_weighted_scores_exp_square)\n # dfdwiwj, dfwiphij are zeros\n dfdphiiwj = reg_inv * ((\n uw_mult * sum_weighted_scores_exp - weighted_scores_exp * uw_mult_sum) / sum_weighted_scores_exp_square)\n\n H[T:, T:] = dfdphiiphij\n H[T:, :T] = dfdphiiwj\n H_diagonal = np.concatenate((dfdwiwi, dfdphiiphii), axis=0)\n np.fill_diagonal(H, H_diagonal)\n\n mH = matrix(z[0] * H)\n return mf, mDf, mH\n\n prev_w = self.w\n prev_slacks = self.slacks\n try:\n wphi = solvers.cp(F, G=G, h=h, A=A, b=b)['x']\n self.w = wphi[:T, :]\n self.slacks = wphi[T:, :]\n except Exception as e: # Catch rank errors and continue to next iteration\n self.slacks = prev_slacks\n self.w = prev_w\n try:\n self.w = np.concatenate((self.w, [[1 / (len(self.w) + 1)]]), axis=0)\n except:\n self.w = np.concatenate((self.w, [1 / (len(self.w) + 1)]), axis=0)\n self.w /= np.sum(self.w)\n\n scores = ((-1 / self.reg) * np.squeeze(np.asarray(np.dot(self.u, self.w) + self.slacks))) + np.log(\n self.d_0) # Update according to Equation (6)\n return self.softmax(scores)", "def __call__(self, inputs_shape):\n assert not self._achieve_init\n self.W = 2 * np.random.randn(self._units, inputs_shape) / np.sqrt(inputs_shape)\n self.b = np.zeros((self._units, 1))\n super(Dense, self).__call__()", "def _initialize_weights(self):\n stddev = 1.0 / math.sqrt(self._hidden_size)\n for layer_id, param_id in itertools.product(\n range(self._num_layers * (self._bidirectional + 1)),\n range(self._num_gates * 2)):\n i = layer_id * 2 + (param_id // self._num_gates)\n j = i + len(self._weights_shapes) // 2\n matrix_shape = self._weights_shapes[i][:]\n bias_shape = self._weights_shapes[j][:]\n matrix_shape[0] //= self._num_gates\n bias_shape[0] //= self._num_gates\n self._set_parameter(\n init_ops.random_uniform(matrix_shape, -stddev, stddev),\n layer_id, param_id, 'matrix')\n self._set_parameter(\n init_ops.random_uniform(bias_shape, -stddev, stddev),\n layer_id, param_id, 'bias')", "def test_init_default(self):\n lfd = LinearDifferentialOperator()\n weightfd = [FDataBasis(Constant(domain_range=(0, 1)), 0)]\n\n self._assert_equal_weights(\n lfd.weights, weightfd,\n \"Wrong list of weight functions of the linear operator\")", "def initialize_weights(m):\n if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d):\n init.xavier_uniform(m.weight.data)" ]
[ "0.71588784", "0.6969171", "0.672849", "0.66897625", "0.66819024", "0.6635739", "0.66334575", "0.6569541", "0.65168756", "0.65055704", "0.6471483", "0.6467132", "0.64426", "0.6441045", "0.6407897", "0.64061683", "0.64002126", "0.63850623", "0.6382718", "0.6356592", "0.6353832", "0.63295835", "0.63225406", "0.62873095", "0.6284029", "0.62829924", "0.6281933", "0.6281535", "0.62794733", "0.6254111", "0.62533635", "0.6237568", "0.6234011", "0.6219919", "0.62169784", "0.62169534", "0.62169534", "0.61968696", "0.61882186", "0.6187996", "0.6178567", "0.6168893", "0.6137369", "0.61102486", "0.61059046", "0.61059046", "0.6096129", "0.6095948", "0.6092388", "0.6087548", "0.6079585", "0.60789067", "0.606723", "0.6057393", "0.6050316", "0.6043527", "0.60184646", "0.60166395", "0.6015385", "0.6015385", "0.6015385", "0.60114527", "0.60105205", "0.5998318", "0.59959835", "0.5993548", "0.59925884", "0.59882075", "0.5981225", "0.59715885", "0.59547836", "0.59547836", "0.59434056", "0.5933586", "0.592845", "0.59266007", "0.59266007", "0.59266007", "0.59234345", "0.5923394", "0.5923394", "0.5923394", "0.5915509", "0.5914101", "0.5910362", "0.5907025", "0.59050345", "0.5904878", "0.5904111", "0.5899677", "0.5899545", "0.58991796", "0.58991355", "0.589821", "0.58933514", "0.5893299", "0.5882735", "0.5881095", "0.5878725", "0.5877537", "0.5876261" ]
0.0
-1
Show the information of training.
def __repr__(self): # info string info = self.model.__repr__() info += "\n=========================\n" info += f"Train data length:\t\t{ len(self.train_dataset) }\n" info += f"Eval sata length:\t\t{ len(self.eval_dataset) }\n" info += f"Optimizer:\t\t\t\t{ str(self.optimizer).split('(')[0] }\n" info += f"Criterion:\t\t\t\t{ str(self.criterion).split('(')[0] }\n" info += f"Training Environment:\t{ self.device.type }\n" info += f"Show information:\t\t{ 'True' if self.info else 'False' }\n" info += "=========================\n" return info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def training_info(self):\n pass", "def show(self):\n print \"Name: \"+str(self.name)\n ss = self.y.shape[0]\n for i in xrange(ss):\n print \"Actual: \"+str(self.y[i])\n print \"Prediction: \"+str(self.a[i])\n print \"\"\n print \"\\n\"", "def print_info(self):\n print(\"Num samples (train/test/val): {} tot: {}\\n\"\n \"Samples per class: {}\\n\"\n \"Sample type {}\\n\"\n \"Sample shape: {}\\n\"\n \"Label type {}\\n\"\n \"Label shape: {}\\n\"\n \"Root dirs: {}\".format([int(np.floor(frac * len(self.__labels))) for frac in self.split_fraction],\n len(self.__labels),\n self.__samples_per_class,\n self.train.output_types[0], self.train.output_shapes[0][1:],\n self.train.output_types[1], self.train.output_shapes[1][1:],\n self.__root_directory_list))", "def show_all_training():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n \n training = Training.query.all()\n\n \n return render_template(\"training_display.html\", training = training)", "def display_training():\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n\n trainings = Training.query.all()\n\n return render_template(\"users/show_training.html\", trainings = trainings)", "def format():\n train_data.info() # retreiving general info about train data\n print('Number of rows and columns of train set:', train_data.shape)\n print('dtype: ', train_data.dtypes) # analyse data type of each column\n print('First 3 rows of the train data:', train_data.head(3))", "def show_training(history: tf.keras.callbacks.History) -> None:\n hist = history.history\n\n if \"loss\" not in hist:\n print(\"Error: 'loss' values not found in the history\")\n return\n\n # plot training\n plt.figure(figsize=(14, 4))\n plt.subplot(121)\n plt.plot(hist[\"loss\"], label=\"Training\")\n if \"val_loss\" in hist:\n plt.plot(hist[\"val_loss\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend()\n\n if \"accuracy\" in hist:\n plt.subplot(122)\n plt.plot(hist[\"accuracy\"], label=\"Training\")\n if \"val_accuracy\" in hist:\n plt.plot(hist[\"val_accuracy\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.legend()\n\n plt.suptitle(\"Training history\")\n plt.show()\n\n # show final results\n print(\"\\nTraining loss: \\t{:.4f}\".format(hist[\"loss\"][-1]))\n if \"val_loss\" in hist:\n print(\"Validation loss: \\t{:.4f}\".format(hist[\"val_loss\"][-1]))\n if \"accuracy\" in hist:\n print(\"\\nTraining accuracy: \\t{:.3f}\".format(hist[\"accuracy\"][-1]))\n if \"val_accuracy\" in hist:\n print(\"Validation accuracy:\\t{:.3f}\".format(hist[\"val_accuracy\"][-1]))", "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"", "def info(self):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%HH-%MM-%SS\")\n print(f\"Exploration info ({now})\")\n print(f\"HDF name: {self.HDF_FILE}\")\n print(f\"Trajectory name: {self.trajectoryName}\")\n if self.model is not None:\n print(f\"Model: {self.model.name}\")\n if hasattr(self, \"nRuns\"):\n print(f\"Number of runs {self.nRuns}\")\n print(f\"Explored parameters: {self.exploreParameters.keys()}\")\n if hasattr(self, \"_t_end_exploration\") and hasattr(self, \"_t_start_exploration\"):\n print(f\"Duration of exploration: {self._t_end_exploration-self._t_start_exploration}\")", "def summary(self):\n\n print(\n \"\\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs.\"\n % (self.dataset_name, self.maxlen, self.charset, self.epochs)\n )\n\n print(\n \"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f.\"\n % (\n self.noise_std,\n self.lstm_dim,\n self.dec_layers,\n self.td_dense_dim,\n self.batch_size,\n self.codelayer_dim,\n self.lr,\n )\n )", "def train():\n role = get_role()\n sets = get_sets()\n return render_template(\"train.html\", sets=sets, role=role)", "def see_evaluation(epoch, training_acc, test_acc):\n print (\"Epoch \", epoch, \"Training acc: \", training_acc*100, \"Test acc: \", test_acc*100)", "def _display_examples(self):\n\n print(self._usage)\n print(self._examples)", "def print_statistics(self) -> None:\n e = self.current_epoch\n if len(self.loss_history[\"test_loss\"]) > 0:\n template = 'Epoch: {} Training loss: {:.4f}, Test loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1],\n self.loss_history[\"test_loss\"][-1]))\n else:\n template = 'Epoch: {} Training loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1]))", "def train(self):\n return", "def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))", "def print_configuration_info():\n print(\"Selected dataset:\", DATASET) \n print(\"Dataset base directory:\", BASE_INPUT_DIR) \n print(\"Daytime option:\", DAYTIME) \n print(\"Nones option:\", NONES) \n print(\"Selected action/activity representation:\", OP)\n print(\"Number of epochs: \", EPOCHS)\n print(\"Number of folds for cross-validation: \", FOLDS)\n print(\"Input directory for data files:\", INPUT_DIR) \n print(\"Embedding matrix file:\", EMBEDDING_WEIGHTS)\n print(\"Action sequences (X) file:\", X_FILE) \n print(\"Word embedding file for activities:\", ACTIVITY_EMBEDDINGS) \n print(\"Activity to int mappings:\", ACTIVITY_TO_INT)\n print(\"Int to activity mappings:\", INT_TO_ACTIVITY) \n print(\"Experiment ID:\", EXPERIMENT_ID)\n print(\"Treat imbalance data:\", TREAT_IMBALANCE)\n print(\"Save intermediate plots:\", SAVE)\n print(\"Batch size:\", BATCH_SIZE)\n print(\"Dropout:\", DROPOUT)\n print(\"Loss:\", LOSS)", "def show(self):\n\n pass", "def show(self):\n print(\"APKs in Session: {}\".format(len(self.analyzed_apk)))\n for d, a in self.analyzed_apk.items():\n print(\"\\t{}: {}\".format(d, a))\n print(\"DEXs in Session: {}\".format(len(self.analyzed_dex)))\n for d, dex in self.analyzed_dex.items():\n print(\"\\t{}: {}\".format(d, dex))\n print(\"Analysis in Session: {}\".format(len(self.analyzed_vms)))\n for d, a in self.analyzed_vms.items():\n print(\"\\t{}: {}\".format(d, a))", "def info(self):\n self.update_info()\n print('Number of electrodes: ' + str(self.n_elecs))\n print('Recording time in seconds: ' + str(self.dur))\n print('Sample Rate in Hz: '+ str(self.sample_rate))\n print('Number of sessions: ' + str(self.n_sessions))\n print('Date created: ' + str(self.date_created))\n print('Meta data: ' + str(self.meta))", "def training(self):\n self.training = True", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def main(self):\r\n\r\n #Train the GEN and DISC\r\n self.modelTrain.main()\r\n self.disp.show()", "def start_training(self):\n self.training()\n \n images, true_labels, pred_labels, pred_probs = self.evaluate_model(proba=True)\n \n metrics = Metrics(images, true_labels, pred_labels, pred_probs, self.classes)\n\n cm = metrics.get_confusion_matrix()\n print('The confusion matrix is:\\n', cm)\n print('*'*100)\n \n cr = metrics.get_classification_report()\n print('The classification report is:\\n', cr)\n print('*'*100)", "def print_status(self, epoch, iteration, prefix=\"\",\n mode=\"train\", is_main_net=True):\n if mode == \"train\":\n log = getattr(self.logger[\"train\"], \"info\")\n else:\n log = getattr(self.logger[\"train\"], \"debug\")\n\n if is_main_net:\n # prepare txt to print\n jump = 3\n txt = \"epoch {} step {} \".format(epoch, iteration)\n for i, (k,v) in enumerate(self.status.items()):\n if (i+1) % jump == 0:\n txt += \", {} = {:.3f}\".format(k, v)\n log(txt)\n txt = \"\"\n elif (i+1) % jump == 1:\n txt += \"{} = {:.3f}\".format(k, v)\n else:\n txt += \", {} = {:.3f}\".format(k, v)\n\n txt += \" ({}->{}/{})\".format(\"|\".join(pred for pred in self.basenet_pred),\n utils.label2string(self.itoa, self.top1_predictions[0]), self.top1_gt)\n\n # print learning information\n log(txt)", "def print_network(self):\n #plot_model(self.model, to_file='model.png', show_shapes=True)\n logging.info(\"\")\n logging.info(self.network)\n logging.info(\"Network accuracy: %.2f%%\" % (self.accuracy * 100))\n logging.info(\"Network loss: %.2f%%\" % (self.loss))", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def print_summary(self):\n self.model.summary()", "def show(self):\n pass", "def summary(self):\r\n print(self.model.summary())", "def train(self):\n self.training = True", "def info(self):\n mallet = c['mallet']\n env = set_env_lang_utf8()\n info_bin = os.path.join(os.path.join(mallet, 'bin'), 'classifier2info')\n info_p = sub.Popen([info_bin, '--classifier', self._model],\n stdout=sub.PIPE, stdin=sub.PIPE, stderr=sub.PIPE, env=env)\n\n cur_class = None\n feats = TwoLevelCountDict()\n\n # Go through and pick out what the features are for\n for line in info_p.stdout:\n content = line.decode(encoding='utf-8')\n\n class_change = re.search('FEATURES FOR CLASS (.*)', content)\n # Set the current class if the section changes\n if class_change:\n cur_class = class_change.group(1).strip()\n continue\n\n # Otherwise, let's catalog the features.\n word, prob = content.split()\n feats.add(cur_class, word, float(prob))\n\n # Now, print some info\n for cur_class in feats.keys():\n print(cur_class, end='\\t')\n print('%s:%.4f' % ('<default>', feats[cur_class]['<default>']), end='\\t')\n top_10 = feats.top_n(cur_class, n=10, key2_re='^nom')\n print('\\t'.join(['%s:%.4f' % (w,p) for w,p in top_10]))", "def print_info(self):\r\n self.system.print_to_log(\r\n f\"{self.__class__.__name__} model: Infection probability: {self.p}, Infectious period: {self.i}, Recovery period: {self.r}.\")", "def summary(self):\n print(self.model.summary())", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def test_train(self):\n print \"x=\",self.trainer.train()", "def summary(self):\n\n print(\"input label:\", self.__input_label)\n print(\"target label:\", self.__target_label)\n print(\"denoising label:\", self.denoising_label)\n print(\"contains a successful DE:\", self.is_successful())", "def print_layer_trainable(model_name):\n\n print('trainable : layer name')\n print('- '*30)\n for layer in model_name.layers:\n # if layer.trainable:\n print(\"{0}:\\t{1}\".format(layer.trainable, layer.name))\n \n return", "def display(config, transfo, learner, *args):\n\n stderr.write(\"Config is %s\\n\" % str(config))\n stderr.write(\"Transfo is %s\\n\" % str(ktpipes.KtPipe.from_json(config[transfo])))\n stderr.write(\"Learner is %s\\n\" % str(learner))", "def print_classification_info(clf, x, y):\n x_tr, x_ts, y_tr, y_ts = train_test_split(x, y, train_size=0.8, test_size=0.2)\n clf.fit(x_tr, y_tr)\n p = clf.predict(x_ts)\n print(classification_report(y_ts, p))\n print(confusion_matrix(y_ts, p))", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def show(self):\n raise NotImplementedError", "def show(self):\n raise NotImplementedError", "def info(self):\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))", "def show_research(self):\n self.list_research = orm_imp.read_research()\n print(\"========================================================\")\n for row in self.list_research:\n print(\n row[\"date\"], \"|| Produit :\", row['subcat'],\n \"|| Meilleure proposition :\", row['product'], \"| Score :\",\n row['nutriscore'], \"| Lien :\", row['url'],\n \"| Ingrédients :\", row['ingredient'])\n print(\"========================================================\")", "def training(request):\n context = {\n\n }\n template = loader.get_template('training.html')\n return HttpResponse(template.render(context, request))", "def show_training_history(self):\n hist = [i.history[\"loss\"][0] for i in self.history]\n plt.plot(hist)", "def show(self) -> None:", "def printParameters(self):\n print(\"----------Model Parameters----------\")\n print(\"Initial Conv. Depth : \" + str(self.conv_depth))\n print(\"Number of Classes : \" + str(self.n_classes))\n print(\"Dropout : \" + str(self.dropout))\n print(\"Activation Function : Relu\")\n print(\"Input Shape : \" + str(self.input_shape))\n print(\"Batch Size : \" + str(self.batch_size))\n print(\"--------Optimizer Parameters--------\")\n print(\"Learning Rate : \" + str(self.optimizer.lr))\n print(\"Momentum : \" + str(self.optimizer.momentum))\n print(\"Initial Decay : \" + str(self.optimizer.initial_decay))", "def train():\n pass", "def print_data_info(my_data, src_field, trg_field):\n train_data = my_data[\"train\"]\n valid_data = my_data[\"val\"]\n test_data = my_data[\"test\"]\n\n print(\"Data set sizes (number of sentence pairs):\")\n print('train', len(train_data))\n print('valid', len(valid_data))\n print('test', len(test_data), \"\\n\")\n\n print(\"First training example:\")\n print(\"src:\", \" \".join(vars(train_data[0])['src']))\n print(\"trg:\", \" \".join(vars(train_data[0])['trg']), \"\\n\")\n\n print(\"Most common words (src):\")\n print(\"\\n\".join([\"%10s %10d\" % x for x in src_field.vocab.freqs.most_common(10)]), \"\\n\")\n print(\"Most common words (trg):\")\n print(\"\\n\".join([\"%10s %10d\" % x for x in trg_field.vocab.freqs.most_common(10)]), \"\\n\")\n\n print(\"First 10 words (src):\")\n print(\"\\n\".join(\n '%02d %s' % (i, t) for i, t in enumerate(src_field.vocab.itos[:10])), \"\\n\")\n print(\"First 10 words (trg):\")\n print(\"\\n\".join(\n '%02d %s' % (i, t) for i, t in enumerate(trg_field.vocab.itos[:10])), \"\\n\")\n\n print(\"Number of NL words (types):\", len(src_field.vocab))\n print(\"Number of AMR words (types):\", len(trg_field.vocab), \"\\n\")", "def show(self):\n print(\"depth: \", self.depth, \"split_id: \", self.split_id,\n \"split_val: \", self.split_val, \"predict_label: \",\n self.predict_label)\n if self.left_node is not None:\n self.left_node.show()\n if self.right_node is not None:\n self.right_node.show()", "def show_info(self):\n print(\"Problem number: \" + str(self.number))\n print(\"Problem name: \" + str(self.name))\n print(\"Problem description: \" + str(self.desc))", "def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)", "def print_summary(self):\n self.network.print_summary()", "def show_learning_stats(track, train_loss, train_acc, valid_acc, test_acc):\n\n if track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc, test_acc))\n\n if track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc))\n\n if not track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, test_acc))\n\n if not track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} \".format(\n train_loss, train_acc))", "def show(self, options=None):\n\n # # IMPLEMENTATION NOTE: Stub for implementing options:\n # if options and self.InspectOptions.ALL_OUTPUT_LABELS in options:\n # pass\n\n print (\"\\n---------------------------------------------------------\")\n print (\"\\n{}\\n\".format(self.name))\n\n print (\"\\tLearning enabled: {}\".format(self._learning_enabled))\n\n # print (\"\\n\\tMechanisms:\")\n # for mech_name in self.mechanismNames:\n # print (\"\\t\\t{}\".format(mech_name))\n\n print (\"\\n\\tMechanisms:\")\n for mech_tuple in self._mech_tuples:\n print (\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n\n print (\"\\n\\tOrigin mechanism: \".format(self.name))\n for mech_tuple in self.originMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n print (\"\\n\\tTerminal mechanism: \".format(self.name))\n for mech_tuple in self.terminalMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n for output_state_name in mech_tuple.mechanism.outputStates:\n print(\"\\t\\t\\t{0}\".format(output_state_name))\n\n print (\"\\n---------------------------------------------------------\")", "def training_summary(history, model, train_generator, eval_generator):\n nrows, ncols = 2, 3\n fig, axes = plt.subplots(nrows, ncols, figsize=(20, 12))\n fig.suptitle(\"Training Summary\")\n axes = np.ravel(axes)\n\n keys = history.history.keys()\n print(keys)\n\n axes[0].plot(history.history[\"loss\"], label=\"training loss\", c=\"blue\")\n axes[0].plot(history.history[\"val_loss\"], label=\"validation loss\", c=\"green\")\n axes[0].set_xlabel(\"epoch\")\n axes[0].set_ylabel(\"loss\")\n axes[0].legend(loc=\"best\")\n\n axes[1].plot(history.history[\"acc\"], label=\"training acc\", c=\"blue\")\n axes[1].plot(history.history[\"val_acc\"], label=\"validation acc\", c=\"green\")\n axes[1].set_xlabel(\"epoch\")\n axes[1].set_ylabel(\"loss\")\n axes[1].legend(loc=\"best\")\n\n eval_model(model, train_generator, eval_generator, axes[2:])\n plt.show()", "def visualize_training(features, labels, pl):\n print(\"Visualizing training\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Take out each feature type, one at a time\n label_map = get_label_map(labels)\n\n for key in label_map.keys():\n like_ind = label_map[key]\n like_data = np.array([features[i] for i in like_ind])\n\n plt.scatter(like_data[:,0],like_data[:,1],label=key)\n\n # get limits\n xmin = features.column_min(0) - .5\n xmax = features.column_max(0) + .5\n ymin = features.column_min(1) - .5\n ymax = features.column_max(1) + .5\n\n plt.xlim(xmin,xmax)\n plt.ylim(ymin,ymax)\n\n # Track the current dividing line, as well as the number of epochs passed\n divider, = plt.plot([],[])\n epoch_tracker = plt.text(-1,.9, '', fontsize=15)\n\n def update(i):\n \"\"\"\n 1.) Get the next set of weights from the tracker\n 2.) Calculate and draw the new divider line\n 3.) Update the epoch counter\n 4.) If we are at the end of an epoch, plot a dashed divider line to track progress\n \"\"\"\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider\n\n ani = animation.FuncAnimation(fig, update, frames=range(len(pl.weights_tracker)), interval=250,repeat=False)\n plt.legend()\n\n # optional save file\n if len(sys.argv) >= 3 :\n ani.save(sys.argv[2], writer='imagemagick', fps=5)\n\n plt.show()", "def show_info(self):\n print 'Querying the station for the configuration...'\n config = self.station.getConfig()\n for key in sorted(config):\n print '%s: %s' % (key, config[key])", "def main_training():\n if request.method == 'GET':\n print(\"Working directory: \", path_creator())\n train_knn_model_params=[config_gettype('train_knn_model','FRS.ini',param) for param in inspect.getfullargspec(train_knn_model)[0]]\n train_knn_model(*train_knn_model_params)\n return_text=\"FRS_training_model.py completed\"\n return jsonify(return_text)\n else:\n return_text1 = \"Опа\"\n return jsonify(return_text1)", "def print_all_features(self):\n if self.DEBUG:\n print('weights')\n print('-------------------------')\n print('w_EDR: ', self.w_EDR)\n print('w_Resource', self.w_RESOURCE)\n print('w_Distance', self.w_DISTANCE)\n print(' ')\n print('Features')\n print('-------------------------')\n print('Agent locations at time step:', self.t, ' are ', self.agent_locations)\n print('Agents that are idle at time step:', self.t, ' are ', self.is_agent_idle)\n print('Tasks that are alive at time step:', self.t, ' are ', self.is_task_alive)\n print('Tasks that are enabled at time step:', self.t, ' are ', self.is_task_enabled)\n print('Tasks that are travel_enabled at time step:', self.t, ' are ', self.travel_time_constraint_satisfied)\n print('Tasks that are in progress at time step:', self.t, ' are ', self.is_task_in_progress)\n print('Tasks that are finished at time step:', self.t, ' are ', self.is_task_finished)\n\n print(\"agent1 is currently at location \", self.get_vectorized_location(self.agents[0].getz()), ' and is working on ',\n self.agents[0].curr_task)\n print(\"agent2 is currently at location \", self.get_vectorized_location(self.agents[1].getz()), ' and is working on ',\n self.agents[1].curr_task)", "def status(self):\n print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'\n classifiers_coefficients = np.zeros(self.current_working_memory.shape)\n print [classifier_i.label for classifier_i in self.classifiers_list]\n for count, classifier_i in enumerate(self.classifiers_list):\n coeffs_i = classifier_i.classifier.coef_ \\\n if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])\n classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i\n # print 'Classifier: ', classifier_i\n # print 'Classifier name: ', classifier_i.label\n # print 'Out address', classifier_i.out_address\n # print 'In address', classifier_i.end_in_address\n # print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_\n plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')\n plt.title('Current working memory')\n plt.figure()\n plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')\n plt.title('Classifier coefficients')\n plt.show()", "def get_test_case_info():\n m = NNMatrixTrainer()\n return m.get_evaluations()", "def main():\n df_titanic = pd.read_csv('train.csv', header=None)\n print df_titanic.describe()", "def describe_training_job(TrainingJobName=None):\n pass", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def showcase():\n\tfrom PIL import Image\n\tfrom PIL import ImageFont\n\tfrom PIL import ImageDraw\n\n\t# Optional: Varied loading process for showcases, when not done at the end of training\n\t# directory = \"results/dirname\"\n\t# checkpoint_path = directory + \"/50000.pth\"\n\t# checkpoint = torch.load(checkpoint_path)\n\t# epoch = checkpoint['epoch']\n\t\"\"\"\n\tfrom collections import OrderedDict\n\tnew_state_dict = OrderedDict()\n\tfor k, v in checkpoint['state_dict'].items():\n\t\t# name = k[7:] # remove `module.`\n\t\tname = k.replace(\".module\", \"\") # removing ‘.moldule’ from key\n\t\tnew_state_dict[name] = v\n\t# load params\n\tmodel.load_state_dict(new_state_dict)\n\n\toptimizer.load_state_dict(checkpoint['optimizer'])\n\tprint(\"Loaded checkpoint '{}' (epoch {})\".format(checkpoint_path, checkpoint['epoch']))\n\t\"\"\"\n\tos.makedirs(directory + \"/showcase\", exist_ok=True)\n\n\tglobal decoder_nat_loss, decoder_syn_loss, KLD_syn_loss, KLD_nat_loss, regressor_nat, regressor_syn\n\n\tactual_showcase(False, False)\n\treset_loss_sums()\n\tactual_showcase(True, False)\n\treset_loss_sums()\n\tactual_showcase(False, True)\n\treset_loss_sums()\n\tactual_showcase(True, True)", "def show(self, notebook=notebook_display):\n print(\"\\nCluster Ensemble:\")\n if notebook is True:\n display(self._df)\n elif notebook is False:\n print(self._df)\n self.massrich_parameters()", "def show(self):\n\t\traise NotImplementedError()", "def visualise(self):\n\n scores, education = self.get_data()\n self.write_data(scores, education)\n\n return True", "def visualize(self):\n # TODO\n #pyLDAvis.enable_notebook()\n #vis = pyLDAvis.gensim.prepare(self.lda_model, self.stemmed_corpus)\n return", "def log_training_results(engine: Engine):\n train_evaluator.run(self.train_dl)\n metrics: Dict[str, float] = train_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')", "def test_training(self):\n\t\tpass", "def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()", "def example():\n accShape, accTexture, accFinal = train_and_eval_all_models()\n print(f'Accuracy -- only shape: {accShape}; only texture: {accTexture}; combined: {accFinal}.')", "def start_training(self):\n self.training = True", "def describe(self):\n print(\"Number of nodes: {0}\".format(self.nnodes))\n print(\"Number of interfaces: {0}\".format(self.ninterfaces))\n print(\"Number of elements: {0}\".format(self.nelements))", "def is_training(self):\n return self.mode == \"train\"", "def is_training(self):\n return self.mode == \"train\"", "def plot_training_info(case, metrics, save, history):\n val = False\n if 'val_accuracy' in history and 'val_loss' in history:\n val = True\n plt.ioff()\n if 'accuracy' in metrics:\n fig = plt.figure()\n plt.plot(history['accuracy'])\n if val:\n plt.plot(history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'accuracy.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)\n\n # summarize history for loss\n if 'loss' in metrics:\n fig = plt.figure()\n plt.plot(history['loss'])\n if val:\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # plt.ylim(1e-3, 1e-2)\n plt.yscale(\"log\")\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'loss.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)", "def summary(self):\r\n self.base.summary()\r\n self.extra_layers.summary()\r\n self.detector.summary()", "def batch_info():\n return BatchInfo(\"Applitools Demo Visual Tests\")", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def show(self):", "def wypisz_info(self):\n print(f\"Samochód: {self.producent} {self.model}\")", "def show_info(self):\n txt = \"Brand: %s\\nModel: %s\\nHostname: %s\\n\"%(self.brand, self.model, self.hostname)\n return txt", "def generate_train_string(self, train_log: dict, step: Union[int,None] = None) -> str:\n print_str = 'Epoch {}, '.format(step) if step else ''\n for k,v in train_log.items():\n print_str = print_str + '{} : {:.4f}, '.format(k, v)\n \n return print_str[:-1]", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def print_training_state(self,epoch,error,finished=False):\n\n #print(\"Epoch:\",iterCount)\n if finished:\n print(\"Network has reached a state of minimum error.\")\n #print(\"Error: {0}\\tEpoch {1}\".format(error,iterCount))\n print(\"\"\"Epoch {0} completed\"\"\".format(epoch),'Error:',error)", "def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")", "def summary(self):\n self.model.summary()", "def show_model_summary(self):\n\t\treturn self.model.summary()", "def print_info(self):\n\n n_metabolites = len(self.metabolites)\n n_reactions = len(self.reactions)\n n_constraints = len(self.constraints)\n n_variables = len(self.variables)\n\n info = pd.DataFrame(columns=['value'])\n info.loc['name'] = self.name\n info.loc['description'] = self.description\n info.loc['num constraints'] = n_constraints\n info.loc['num variables'] = n_variables\n info.loc['num metabolites'] = n_metabolites\n info.loc['num reactions'] = n_reactions\n info.index.name = 'key'\n\n print(info)" ]
[ "0.8102564", "0.71761745", "0.6958835", "0.6510571", "0.65085834", "0.6437893", "0.6429031", "0.6424601", "0.64181775", "0.6403707", "0.6398665", "0.6397122", "0.63890433", "0.6379363", "0.6367299", "0.6364796", "0.6359014", "0.63058555", "0.63040435", "0.6300371", "0.6295759", "0.6288267", "0.628584", "0.62725085", "0.62725085", "0.62725085", "0.62725085", "0.62725085", "0.6265262", "0.6256336", "0.6253152", "0.6246902", "0.62312883", "0.6231067", "0.62276196", "0.62093216", "0.6207989", "0.6202114", "0.6187828", "0.6178222", "0.6171897", "0.6158527", "0.61565524", "0.61369693", "0.6111885", "0.6088584", "0.60780525", "0.6072308", "0.6072308", "0.6062101", "0.6055876", "0.6052086", "0.60452753", "0.603936", "0.6038765", "0.6037842", "0.60374755", "0.6030386", "0.6026509", "0.6020897", "0.6013016", "0.6005856", "0.6002257", "0.6000376", "0.5999894", "0.59981894", "0.59967756", "0.59942216", "0.5993004", "0.59889925", "0.59866744", "0.59848565", "0.59793234", "0.59769565", "0.597272", "0.5958584", "0.5958483", "0.59436786", "0.59406465", "0.5935559", "0.59340745", "0.59330934", "0.5922114", "0.5912318", "0.5911435", "0.5911435", "0.59080464", "0.59052634", "0.59022444", "0.5896556", "0.5891252", "0.58870375", "0.5882994", "0.5881267", "0.588062", "0.58659226", "0.5847114", "0.5841596", "0.5840199", "0.58352643" ]
0.6441708
5
A auto training method for fast using. And the epoch and the location for saving models should be specified while using auto_train().
def auto_train(self, epoch: int, save_model_location: str, eval: bool = True, eval_interval: int = 1, info: bool = True, save_static_dicts: bool = True): # initialization self.info = info best_attempt = float("-inf") self._set_train() # clean the cache if cuda is available self._clean_cache() # start training for n in range(epoch): self.iter_epoch() if self.eval_dataset and eval and not (n + 1) % eval_interval: # eval start eval_loss, accuracy = self.eval() # save the best model if accuracy > best_attempt: best_attempt = accuracy if save_static_dicts: self.save_state_dict(save_model_location) else: self.save_model(save_model_location) print(f"The best model is saved to { self._set_save_location(save_model_location) }. " f"Best accuracy: { best_attempt }")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train_one_epoch(self):\n raise NotImplementedError", "def train(self, training_steps=10):", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, model, tqdm_data,\n optimizer_disc=None, optimizer_gen=None):", "def train():\n pass", "def go_train(sources, targets, model, dictloc, max_epochs):\n\n\ttrain.trainer(targets, sources, model, \n\t\tsaveto=\"data/trainer.npz\", \n\t\tdictionary=dictloc, \n\t\tmax_epochs=max_epochs, \n\t\tsaveFreq=100, \n\t\treload_=os.path.isfile(\"data/trainer.npz\")\n\t)", "def train_model(train_generator, validation_generator):\n # we build a test generator to benchmark the model on unseen data\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n test_generator = test_datagen.flow_from_directory(\n test_path,\n target_size=(200, 200),\n color_mode=\"rgb\",\n shuffle=True,\n class_mode='sparse',\n batch_size=batch_size)\n model = build_model()\n filepath = join(save_path, weights_path)\n checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', save_best_only=True, mode='max')\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=epochs // 5, verbose=1, restore_best_weights=True)\n log_dir = join(home, save_path, 'logs', 'fit_smart', datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)\n callbacks_list = [early_stopping, checkpoint, tensorboard_callback]\n # origin [sessions] models each [epochs] times\n max_acc = 0.0\n for i in range(sessions):\n # model training and evaluation\n history = model.fit(\n train_generator,\n steps_per_epoch=train_generator.samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples // batch_size\n , verbose=2, callbacks=callbacks_list, workers=multiprocessing.cpu_count(),\n use_multiprocessing=False)\n model.load_weights(join(save_path, weights_path))\n test_loss, test_acc = model.evaluate(test_generator, steps=len(test_generator))\n # save model if it performed better\n if test_acc > max_acc:\n max_acc = test_acc\n model.save(join(home, save_path, model_name))\n print(\"accuracy: \", test_acc, \"\\n Loss:\", test_loss)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def _train_model(self):\n raise NotImplementedError()", "def train(self,\n epochs=10,\n track_every=20):\n self.model.train()\n print(\"Model put in training mode.\")\n\n for i in range(epochs):\n stop_training = False\n batch_losses = []\n for j, sample in enumerate(self.training_set):\n\n # Run single loop.\n loss = self.partial_fit(sample)\n batch_losses.append(loss)\n self.print_progress(epoch=i,\n batch=j,\n loss=loss)\n\n if j % track_every == 0 and j != 0:\n batch_loss = numpy.mean(numpy.array(batch_losses))\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n stop_training = self.estopper.check_stop_training(val_loss)\n\n if stop_training:\n break\n\n # End batch iteration.\n\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n if stop_training:\n print(\"Early stopping.\")\n torch.save(self.model, self.save_dir + \"model.pt\")\n print(f\"Model saved to {self.save_dir}model.pt\")\n break\n\n # End training loop.", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, overwrite=False):\n file_exists = os.path.isfile(_get_model_file(CHECKPOINT_PATH, model_name))\n if file_exists and not overwrite:\n print(\"Model file already exists. Skipping training...\")\n else:\n if file_exists:\n print(\"Model file exists, but will be overwritten...\")\n\n # Defining optimizer, loss and data loader\n optimizer = optim.SGD(net.parameters(), lr=1e-2, momentum=0.9) # Default parameters, feel free to change\n loss_module = nn.CrossEntropyLoss()\n train_loader_local = data.DataLoader(\n train_set, batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True\n )\n\n val_scores = []\n best_val_epoch = -1\n for epoch in range(max_epochs):\n ############\n # Training #\n ############\n net.train()\n true_preds, count = 0.0, 0\n for imgs, labels in tqdm(train_loader_local, desc=f\"Epoch {epoch+1}\", leave=False):\n imgs, labels = imgs.to(device), labels.to(device) # To GPU\n optimizer.zero_grad() # Zero-grad can be placed anywhere before \"loss.backward()\"\n preds = net(imgs)\n loss = loss_module(preds, labels)\n loss.backward()\n optimizer.step()\n # Record statistics during training\n true_preds += (preds.argmax(dim=-1) == labels).sum()\n count += labels.shape[0]\n train_acc = true_preds / count\n\n ##############\n # Validation #\n ##############\n val_acc = test_model(net, val_loader)\n val_scores.append(val_acc)\n print(\n f\"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%\"\n )\n\n if len(val_scores) == 1 or val_acc > val_scores[best_val_epoch]:\n print(\"\\t (New best performance, saving model...)\")\n save_model(net, CHECKPOINT_PATH, model_name)\n best_val_epoch = epoch\n elif best_val_epoch <= epoch - patience:\n print(f\"Early stopping due to no improvement over the last {patience} epochs\")\n break\n\n # Plot a curve of the validation accuracy\n plt.plot([i for i in range(1, len(val_scores) + 1)], val_scores)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Validation accuracy\")\n plt.title(f\"Validation performance of {model_name}\")\n plt.show()\n plt.close()\n\n load_model(CHECKPOINT_PATH, model_name, net=net)\n test_acc = test_model(net, test_loader)\n print((f\" Test accuracy: {test_acc*100.0:4.2f}% \").center(50, \"=\") + \"\\n\")\n return test_acc", "def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def train_model(clf, dataloaders, criterion, optimizer, num_epochs=25,\n patience=10, save_model_path=None, resume=False, finetune=False):\n\n since = time.time()\n\n val_acc_history = []\n\n #best_model_wts = copy.deepcopy(clf.state_dict())\n best_acc = 0.0\n best_epoch = 0\n epoch = 0\n\n if resume:\n assert save_model_path is not None\n if save_model_path in glob(save_model_path):\n _model, _criterion, _optimizer, _epoch, _loss, _accuracy, _history = _resume_from_checkpoint(save_model_path)\n #if finetune:\n # model.set_requires_grad(_model, True)\n clf = _model\n criterion = _criterion\n optimizer = _optimizer\n epoch = _epoch + 1\n best_epoch = _epoch\n best_acc = _accuracy\n val_acc_history = _history\n else:\n raise Exception(\"No such model file in the specified path.\")\n\n if finetune:\n model.set_requires_grad(clf, True)\n\n best_model_wts = copy.deepcopy(clf.state_dict())\n test_dataloader = dataloaders.pop('test', None)\n\n clf = clf.to(device)\n\n for epoch in range(epoch, num_epochs):\n print('Epoch {}/{}'.format(epoch + 1, num_epochs))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n clf.train() # Set model to training mode\n else:\n clf.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in tqdm(dataloaders[phase]):\n inputs = inputs.to(device)\n labels = labels.to(device).long()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n # Get model outputs and calculate loss\n outputs = clf(inputs)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print('{} Loss: {:.4f}, Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val':\n val_acc_history.append(epoch_acc)\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(clf.state_dict())\n best_epoch = epoch\n if save_model_path:\n _save_checkpoint(clf, criterion, optimizer, epoch, epoch_loss, best_acc, val_acc_history, save_model_path)\n print(\"Model checkpoint saved successfully in the given path!\")\n print()\n if patience is not None:\n if epoch - best_epoch >= patience:\n break\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n clf.load_state_dict(best_model_wts)\n\n test_acc = test_model(clf, test_dataloader, criterion, optimizer)\n if not save_model_path:\n if IN_COLAB:\n save_model_path = \"/content/drive/My Drive/Audio-classification-using-multiple-attention-mechanism/best_weights.h5\"\n else:\n save_model_path = \"best_weights.h5\"\n save_model(clf, os.path.splitext(save_model_path)[0] + (\"_final_finetuned\" if finetune else \"_final\") + os.path.splitext(save_model_path)[1])\n\n return clf, val_acc_history, test_acc", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def train(self, batch_training=False):\n raise NotImplementedError", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train(self):\n self.training = True", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def set_train(self):\n self.model.train()", "def train_full_model(X,y_train):\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n\n #train\n print(\"train model\")\n tic = time.time()\n model.fit(x_train,y_train)\n tac = time.time()\n print(\"elapsed time\", tac-tic)\n\n #save data\n save_data(model,scaler,x_train,y_train)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def start_training(self):\n self.training = True", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def train(\n self,\n epochs: int = 10,\n train_steps: int = None,\n train_batch_size: int = 32,\n val_batch_size: int = 32,\n classifier_loss: str = \"focal\",\n lr: float = 1e-3,\n gamma: float = 2.0,\n cache: bool = False,\n ):\n\n # compile the model object\n if not self.model_path:\n self.compile(\n classifier_loss=classifier_loss,\n lr=lr,\n gamma=gamma,\n train_batch_size=train_batch_size,\n )\n\n # Common validation dataset format across all training regimes\n val_generator = DataGenerator(\n batch_size=val_batch_size,\n split=\"val\",\n layers=self.n_blocks,\n cache=cache,\n train_mode=\"classifier\",\n )\n if self.train_mode in [\"both\", \"pretrain\"]:\n # prepare the generator\n train_generator = DataGenerator(\n batch_size=train_batch_size,\n split=\"train\",\n augment=True,\n shuffle=True,\n cache=cache,\n train_mode=\"pretrain\",\n )\n # number of trainig steps per epoch\n train_steps = len(train_generator) if train_steps is None else train_steps\n self.model.fit(\n train_generator(),\n initial_epoch=self.epoch,\n epochs=epochs,\n workers=8,\n verbose=2,\n steps_per_epoch=train_steps,\n )\n\n # Save the trained autoencoder model\n os.makedirs(\"./ae_model\", exist_ok=True)\n self.model.save(\"ae_model/ae_model.h5\")\n\n if self.train_mode == \"both\":\n self.epoch = 0\n\n if self.train_mode in [\"both\", \"classifier\"]:\n # Directory for saving the trained model\n os.makedirs(\"./class_model\", exist_ok=True)\n\n # prepare the generators for classifier training\n train_generator_classifier = DataGenerator(\n batch_size=train_batch_size,\n split=\"train\",\n layers=self.n_blocks,\n augment=True,\n contrastive=True,\n cache=cache,\n shuffle=True,\n train_mode=\"classifier\",\n )\n\n # number of trainig steps per epoch\n train_steps = (\n len(train_generator_classifier) if train_steps is None else train_steps\n )\n\n # if self.train_mode == \"both\":\n # # Use feature representations learnt from AutoEncoder training\n # self.encoder_model.trainable = True\n\n self.classifier.fit(\n train_generator_classifier(),\n initial_epoch=self.epoch,\n epochs=epochs,\n workers=8,\n verbose=2,\n steps_per_epoch=train_steps,\n callbacks=self.callbacks(val_generator),\n )\n\n if self.train_mode == \"combined\":\n # Directory for saving the trained model\n os.makedirs(\"./com_model\", exist_ok=True)\n\n # prepare the generators for classifier training\n train_generator = DataGenerator(\n batch_size=train_batch_size,\n split=\"train\",\n layers=self.n_blocks,\n augment=True,\n contrastive=True,\n shuffle=True,\n cache=cache,\n train_mode=\"combined\",\n )\n\n # number of trainig steps per epoch\n train_steps = len(train_generator) if train_steps is None else train_steps\n\n self.combined.fit(\n train_generator(),\n initial_epoch=self.epoch,\n epochs=epochs,\n workers=8,\n verbose=2,\n steps_per_epoch=train_steps,\n callbacks=self.callbacks(val_generator),\n )\n # os.makedirs(\"./com_model\", exist_ok=True)\n # self.combined.save(\"com_model/com_model.h5\")", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def train(self, session, train_examples, dev_examples, train_dir):\n\n # some free code to print out number of parameters in your model\n # it's always good to check!\n # you will also want to save your model parameters in train_dir\n # so that you can use your trained model to make predictions, or\n # even continue training\n\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n if self.summary_flag:\n self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train', session.graph)\n\n logging.info(\"Train Loss File: {}\".format(self.train_loss_log))\n logging.info(\"Dev Loss File: {}\".format(self.dev_loss_log))\n best_score = 100000\n train_log = open(self.train_loss_log, \"w\")\n dev_log = open(self.dev_loss_log, \"w\")\n for epoch in range(self.n_epoch):\n print(\"Epoch {:} out of {:}\".format(epoch + 1, self.n_epoch))\n dev_score = self.run_epoch(session, train_examples, dev_examples, epoch, train_log)\n dev_log.write(\"{},{}\\n\".format(epoch + 1, dev_score))\n logging.info(\"Average Dev Cost: {}\".format(dev_score))\n logging.info(\"train F1 & EM\")\n f1, em = self.evaluate_answer(session, train_examples, self.rev_vocab, log = True)\n logging.info(\"Dev F1 & EM\")\n f1, em = self.evaluate_answer(session, dev_examples, self.rev_vocab, log = True)\n if dev_score < best_score:\n best_score = dev_score\n print(\"New best dev score! Saving model in {}\".format(train_dir + \"/\" + self.model_name))\n self.saver.save(session, train_dir + \"/\" + self.model_name)\n\n return best_score", "def train_model(model, X_train, X_valid, Y_train, Y_valid):\n\n # Let's CreateCheckPoint so as to save our best models and model Logs after every epoch.\n # This checkPoint function will be called as callback functions after every epoch.\n\n checkpoint = ModelCheckpoint(\n 'model-{epoch:03d}.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto')\n\n tensorboard = TensorBoard(log_dir=\"log\\{}\".format(time())) \n\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=LEARNING_RATE))\n\n model.fit_generator(batch_generator(DATA_DIR, X_train, Y_train, BATCH_SIZE, True),\n SAMPLES_PER_EPOCH,\n NO_OF_EPOCHS,\n max_q_size=1,\n validation_data=batch_generator(\n DATA_DIR, X_valid, Y_valid, BATCH_SIZE, False),\n nb_val_samples=len(X_valid),\n callbacks=[checkpoint, tensorboard],\n verbose=1\n )", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def train(self):\n for epoch in range(self.current_epoch, self.config.optim.epochs):\n self.current_epoch = epoch\n self.train_one_epoch()\n if epoch % self.config.optim.val_freq == 0:\n self.validate()\n if self.config.optim.auto_schedule:\n self.scheduler.step(self.current_val_loss)\n self.save_checkpoint()", "def train(self):\n raise NotImplementedError", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def train_wrapper(model):\n if FLAGS.pretrained_model:\n model.load(FLAGS.pretrained_model)\n # load data\n train_input_handle, test_input_handle = datasets_factory.data_provider(\n FLAGS.dataset_name,\n FLAGS.train_data_paths,\n FLAGS.valid_data_paths,\n FLAGS.batch_size * FLAGS.n_gpu,\n FLAGS.img_width,\n seq_length=FLAGS.total_length,\n is_training=True)\n\n eta = FLAGS.sampling_start_value\n\n for itr in range(1, FLAGS.max_iterations + 1):\n if train_input_handle.no_batch_left():\n train_input_handle.begin(do_shuffle=True)\n ims = train_input_handle.get_batch()\n if FLAGS.dataset_name == 'penn':\n ims = ims['frame']\n ims = preprocess.reshape_patch(ims, FLAGS.patch_size)\n\n eta, real_input_flag = schedule_sampling(eta, itr)\n\n trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n if itr % FLAGS.snapshot_interval == 0:\n model.save(itr)\n\n if itr % FLAGS.test_interval == 0:\n trainer.test(model, test_input_handle, FLAGS, itr)\n\n train_input_handle.next()", "def train(self):\n\t\traise NotImplementedError", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def _train(args, pretrain_args):\n start_time = time.time()\n print('Training', ', '.join(args.speakers), '...')\n\n # randomly sample validation set monte_carlo_cv_num times\n for num in range(args.monte_carlo_cv_num):\n # get seed used to sub-sample validation dataset (use 42 for 1st run)\n seed = utils.get_seed(num)\n\n # get train/valid/test data and convert to sequences\n train_data, valid_data, test_data, id_to_word = data_reader.get_data(\n args, seed=seed)\n # set configurations/hyperparameters for model\n config, test_config = utils.set_config(args, id_to_word)\n\n # initialize word embeddings\n init_embed = utils.init_embedding(id_to_word, dim=args.embed_size,\n init_scale=args.init_scale,\n embed_path=args.embed_path)\n\n with tf.Graph().as_default():\n # initializer used to initialize TensorFlow variables\n initializer = tf.random_uniform_initializer(-config['init_scale'],\n config['init_scale'])\n # create Train model\n with tf.name_scope('Train'):\n with tf.variable_scope('Model', reuse=None,\n initializer=initializer):\n m_train = model.Model(args, is_training=True, config=config,\n init_embed=init_embed, name='Train')\n m_train.build_graph()\n\n # create Valid model\n with tf.name_scope('Valid'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_valid = model.Model(args, is_training=False, config=config,\n init_embed=init_embed, name='Valid')\n m_valid.build_graph()\n\n # create Test model\n with tf.name_scope('Test'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_test = model.Model(args, is_training=False, config=test_config,\n init_embed=init_embed, name='Test')\n m_test.build_graph()\n\n # create summaries to be viewed in TensorBoard\n tb_summaries = utils.TensorBoardSummaries()\n tb_summaries.create_ops()\n\n init = tf.global_variables_initializer()\n\n # if pretrained, must create dict to initialize TF Saver\n if bool(pretrain_args):\n # get trainable variables and convert to dict for Saver\n reuse_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES)\n reuse_vars_dict = dict(\n [(var.op.name, var) for var in reuse_vars])\n # create saver for TF session (see function for addl details)\n saver = utils.create_tf_saver(args, pretrain_args,\n reuse_vars_dict)\n else:\n saver = tf.train.Saver()\n\n # ppls dict has perplexities that are stored in results database\n ppls = {}\n ppls, _ = _update_ppls(ppls, initialize=True)\n\n with tf.Session() as sess:\n sess.run(init)\n\n if args.load_path != '':\n print('Restoring model...')\n saver.restore(sess, args.load_path)\n\n for epoch in range(config['max_epoch']):\n print('Epoch: {0} Learning rate: {1:.3f}\\n'.format(\n epoch + 1, sess.run(m_train.lr)))\n for i, speaker in enumerate(args.speakers):\n print('Training {0} ...'.format(speaker))\n\n # run epoch on training data\n train_perplexity = _run_epoch(sess, m_train, args, train_data,\n i, tb_summaries, id_to_word,\n train_op=m_train.train_op,\n verbose=True)\n print('Epoch: {0} Train Perplexity: {1:.3f}'.format(\n epoch + 1, train_perplexity))\n ppls, _ = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=train_perplexity,\n dataset='train')\n\n print('Validating...')\n # run epoch on validation data\n valid_perplexity = _run_epoch(sess, m_valid, args,\n valid_data, i, tb_summaries,\n id_to_word, verbose=True)\n print('Epoch: {0} Valid Perplexity: {1:.3f}'.format(\n epoch + 1, valid_perplexity))\n ppls, improved = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=valid_perplexity,\n dataset='valid')\n\n if improved:\n # save model if valid ppl is lower than current\n # best valid ppl\n if args.save_path != '':\n print('Saving model to {0}.'.format(\n args.save_path))\n saver.save(sess, args.save_path)\n\n for i, speaker in enumerate(args.speakers):\n print('Testing {0} ...'.format(speaker))\n print('Restoring best model for testing...')\n saver.restore(sess, args.save_path)\n # run model on test data\n test_perplexity = _run_epoch(sess, m_test, args, test_data, i)\n ppls['test_ppl_' + speaker] = test_perplexity\n print('Test Perplexity: {0:.3f}'.format(test_perplexity))\n\n if args.insert_db == 'True':\n # write params/config/results to sql database\n results_db.insert_results(args, config, start_time, ppls)", "def train(train_x_df, train_y_df):\n x = list(train_x_df.columns.values)\n model = build_model(len(x))\n\n os.makedirs(\"./saved_models\", exist_ok=True)\n\n cp_callback = keras.callbacks.ModelCheckpoint(\n checkpoint_path, save_weights_only=True, save_best_only=True, verbose=1\n )\n\n # first 80 percent for training\n train_x = train_x_df[1:246005]\n train_y = train_y_df[1:246005]\n\n # other 20 percent for evaluating\n eval_x = train_x_df[246006 : len(train_x_df) - 1]\n eval_y = train_y_df[246006 : len(train_y_df) - 1]\n\n # train model\n model.fit(\n train_x,\n train_y,\n epochs=epochs,\n validation_split=0.2,\n verbose=0,\n batch_size=batch_size,\n callbacks=[cp_callback],\n )\n\n print(\"done training\")\n\n # export the tensorflow model to a onnx model file\n # for loading in tfe and secure enclave\n export_to_onnx(\n x,\n model,\n \"./house_credit_default.onnx\",\n )\n\n # evaluate the model using AUC, the metric used in the kaggle competition\n loss = model.evaluate(eval_x, eval_y, batch_size=batch_size)\n\n predictions = model.predict(eval_x, batch_size=batch_size)\n auc = tf.metrics.auc(eval_y, predictions)\n\n print(\"Evaluation Loss:\", loss[0])\n print(\"Accuracy:\", loss[1])\n print(\"AUC: \", auc[0][1])", "def train(model, infer_train, infer_val, load_checkpoint=None):\n\n global checkpoint_name\n print('Initialising {}'.format(cfg['experiment_name']))\n checkpoint_folder = 'checkpoints/{}/'.format(cfg['experiment_name'])\n\n if not os.path.exists(checkpoint_folder):\n os.makedirs(checkpoint_folder)\n\n tb_folder = 'tb/{}/'.format(cfg['experiment_name'])\n if not os.path.exists(tb_folder):\n os.makedirs(tb_folder)\n\n writer = SummaryWriter(logdir=tb_folder, flush_secs=30)\n optimiser = Adam(model.parameters(), lr=cfg['learning_rate'], weight_decay=cfg['weight_decay'])\n\n train_dataset = TweetDataset(dataset_type='train')\n train_loader = DataLoader(train_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=True, pin_memory=True)\n\n val_dataset = TweetDataset(dataset_type='val')\n val_loader = DataLoader(val_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=False, pin_memory=True)\n\n if load_checkpoint:\n checkpoint = torch.load(load_checkpoint)\n assert model.config == checkpoint['net_config'], \\\n \"The provided checkpoint has a different configuration, loading is impossible\"\n start_epoch = checkpoint['epoch'] + 1\n epochs = cfg['epochs'] + start_epoch\n step = checkpoint['step']\n model.load_state_dict(checkpoint['model'])\n optimiser.load_state_dict(checkpoint['optimiser'])\n print(\"Loaded the checkpoint at {}\".format(load_checkpoint))\n else:\n start_epoch, step = 0, 0\n epochs = cfg['epochs']\n\n init_loss = 0.\n avg_loss = AverageMeter()\n best_mae = 1e10\n\n print('Sanity val')\n val(model, val_loader, writer, 0, infer_val)\n model.train()\n\n print('Starting training')\n for epoch in range(start_epoch, epochs):\n loader_length = len(train_loader)\n epoch_start = time.time()\n\n for batch_idx, batch in enumerate(train_loader):\n optimiser.zero_grad()\n\n loss = infer_train(model, batch)\n loss.backward()\n\n if epoch == 0 and batch_idx == 0:\n init_loss = loss\n\n # logging\n elapsed = time.time() - epoch_start\n progress = batch_idx / loader_length\n est = datetime.timedelta(seconds=int(elapsed / progress)) if progress > 0.001 else '-'\n avg_loss.update(loss)\n suffix = '\\tloss {:.4f}/{:.4f}\\tETA [{}/{}]'.format(avg_loss.avg, init_loss,\n datetime.timedelta(seconds=int(elapsed)), est)\n printProgressBar(batch_idx, loader_length, suffix=suffix,\n prefix='Epoch [{}/{}]\\tStep [{}/{}]'.format(epoch, epochs - 1, batch_idx, loader_length))\n\n writer.add_scalar('Steps/train_loss', loss, step)\n\n # saving the model\n if step % cfg['checkpoint_every'] == 0:\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n step += 1\n optimiser.step()\n\n # validating\n if step % cfg['val_every'] == 0:\n mae = val(model, val_loader, writer, step, infer_val)\n if mae < best_mae:\n best_mae = mae\n print('Best model with V{:.2f}'.format(best_mae))\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n '{}/best.pth'.format(checkpoint_folder))\n model.train()\n\n # end of epoch\n print('')\n writer.add_scalar('Epochs/train_loss', avg_loss.avg, epoch)\n avg_loss.reset()\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': loader_length, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n\n # finished training\n writer.close()\n print('Training finished :)')", "def train_and_eval(params: flags.FlagValues) -> tf.keras.callbacks.History:\n logging.info('Run training for {} with {}'.format(params.model_name,\n params.dataset_name))\n logging.info('The CLI params are: {}'.format(params.flag_values_dict()))\n d_config = _get_dataset_config().get(params.dataset_name)()\n m_config = _get_model_config().get(params.model_name)()\n\n logging.info('Training dataset configuration:', d_config)\n logging.info('Training model configuration:', m_config)\n\n # override the model params with CLI params\n m_config.num_classes = d_config.num_classes\n m_config.dropout_keep_prob = 1 - params.dropout_rate\n m_config.weight_decay = params.std_weight_decay\n m_config.stddev = params.truncated_normal_stddev\n m_config.batch_norm_decay = params.batch_norm_decay\n\n strategy = tf.distribute.MirroredStrategy()\n with strategy.scope():\n # override the dataset params with CLI params\n if params.data_dir:\n d_config.data_dir = params.data_dir\n global_batch_size = params.batch_size * strategy.num_replicas_in_sync\n\n # override the dataset params with CLI params\n # for distributed training, update batch size\n d_config.batch_size = global_batch_size\n # determine whether one_hot is used based on label_smoothing\n d_config.one_hot = params.label_smoothing and params.label_smoothing > 0\n\n # build train dataset\n train_dataset = get_dataset(d_config)\n # build validation dataset\n d_config.split = 'validation'\n eval_dataset = get_dataset(d_config)\n\n # compute number iterations per epoch\n steps_per_epoch = d_config.num_examples // d_config.batch_size\n eval_steps = d_config.num_eval_examples // d_config.batch_size\n\n # build the model\n keras_model = build_model(\n model_name=params.model_name,\n dataset_config=d_config,\n model_config=m_config\n )\n\n # build the optimizer\n learning_params = defaults.LR_CONFIG_DEFAULT\n learning_params.update({'initial_lr': params.lr,\n 'decay_epochs': params.lr_decay_epochs,\n 'decay_rate': params.lr_decay_rate})\n optimizer_params = defaults.OP_CONFIG_DEFAULT\n optimizer_params.update({'decay': params.op_decay_rate,\n 'momentum': params.op_momentum})\n optimizer = _get_optimizer(\n batch_size=global_batch_size,\n steps_per_epoch=steps_per_epoch,\n lr_name=params.learning_scheduler_name,\n optimizer_name=params.optimizer_name,\n lr_params=learning_params,\n optimizer_params=optimizer_params\n )\n\n logging.info('Exponential decay rate:{}'.format(params.ma_decay_rate))\n if params.ma_decay_rate:\n optimizer = tfa.optimizers.MovingAverage(\n optimizer=optimizer,\n average_decay=params.ma_decay_rate)\n\n # compile model\n if d_config.one_hot:\n loss_obj = tf.keras.losses.CategoricalCrossentropy(\n label_smoothing=params.label_smoothing)\n else:\n loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()\n\n keras_model.compile(\n optimizer=optimizer,\n loss=loss_obj,\n metrics=[_get_metrics(one_hot=d_config.one_hot)['acc']],\n )\n\n logging.info(keras_model.summary())\n\n initial_epoch = 0\n if params.resume_checkpoint:\n initial_epoch = _resume_from_checkpoint(model=keras_model,\n model_dir=params.model_dir,\n train_steps=steps_per_epoch)\n\n # Callbacks\n callbacks_to_use = _get_callback(model_dir=params.model_dir)\n\n # Train model\n history = keras_model.fit(\n train_dataset,\n steps_per_epoch=steps_per_epoch,\n epochs=params.epochs,\n validation_data=eval_dataset,\n validation_steps=eval_steps,\n initial_epoch=initial_epoch,\n verbose=1,\n callbacks=callbacks_to_use\n )\n\n return history", "def train_model(model, loss_fn, optimizer, train_generator, dev_generator, EXP):\n prev_loss = np.Infinity\n prev_acc = 0\n trained_model = model # to hold best model\n epochs = 150\n every = 1\n train_loss_track = []\n dev_loss_track = []\n train_acc_track = []\n dev_acc_track = []\n\n train_num_batch = 480/30\n num_batch = 60/30\n for epoch in range(epochs):\n\n training_loss, training_accuracy = 0.0, 0.0\n train_gold = []\n train_pred = []\n # Set network into train set\n model.train()\n hidden = None\n for batch_x, batch_y in train_generator:\n # reset optimizer\n optimizer.zero_grad()\n # Predict outputs\n batch_x = batch_x.permute(1, 0, 2)\n outputs = model(batch_x)\n\n # Calculate the loss\n train_gold.extend(batch_y.cpu().detach().numpy())\n train_pred.extend(outputs.argmax(1).cpu().detach().numpy())\n loss = loss_fn(outputs, batch_y)\n # Backward and update step\n loss.backward()\n optimizer.step()\n\n training_loss += loss.detach().item()\n training_loss = training_loss/train_num_batch\n\n train_accuracy = accuracy_score(train_gold, train_pred)\n print('Epoch: ' + str(epoch) + ', Total train Loss: ' + str(training_loss)\n + ', Total train accu: ' + str(round(train_accuracy * 100, 2)) + \"%\")\n train_loss_track.append(training_loss)\n train_acc_track.append(train_accuracy)\n\n if epoch % every == 0:\n # Set network into development set\n val_gold = []\n val_pred = []\n dev_loss, dev_accuracy = 0.0, 0.0\n with torch.no_grad(): # set not gradient\n model.eval()\n # optimizer.zero_grad()\n\n for batch_x, batch_y in dev_generator:\n batch_x = batch_x.permute(1, 0, 2)\n outputs = model(batch_x)\n\n # Add predictions and gold labels\n val_gold.extend(batch_y.cpu().detach().numpy())\n val_pred.extend(outputs.argmax(1).cpu().detach().numpy())\n\n dev_loss += loss_fn(outputs.double(), batch_y.long()).detach().item()\n\n dev_accuracy = accuracy_score(val_gold, val_pred)\n f1 = f1_score(val_gold, val_pred, average='macro')\n dev_loss = dev_loss/num_batch\n print('Dev Epoch: ' + str(epoch) + ', Total dev Loss: ' + str(dev_loss)\n + ', Total dev accu: ' + str(round(dev_accuracy*100, 3)) + \"%\")\n\n if dev_accuracy > prev_acc:\n print(f\"saving model... loss: {dev_loss}\")\n # prev_loss = dev_loss\n prev_acc = dev_accuracy\n trained_model = model\n torch.save(trained_model, f\"./models/best_model_{EXP}.pth\")\n dev_loss_track.append(dev_loss)\n dev_acc_track.append(dev_accuracy)\n tracks = pd.DataFrame()\n tracks['train_loss'] = train_loss_track\n tracks['train_acc'] = train_acc_track\n tracks['dev_loss'] = dev_loss_track\n tracks['dev_acc'] = dev_acc_track\n\n print(tracks)\n tracks.to_csv(f\"history_{EXP}.csv\")\n pickle.dump(tracks, open(f\"history_{EXP}.pkl\", 'wb'), protocol=4)\n\n return trained_model", "def train_one_epoch(self):\n prog_bar = tqdm(enumerate(self.train_data), total=len(self.train_data))\n self.model.train()\n with autocast():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask) \n\n loss = self.loss_fn(outputs.squeeze(1), targets)\n prog_bar.set_description('loss: {:.2f}'.format(loss.item()))\n\n Config.scaler.scale(loss).backward()\n Config.scaler.step(self.optimizer)\n Config.scaler.update()\n self.optimizer.zero_grad()\n self.scheduler.step()", "def train(self, dataset=None, epochs=2, verbose=1, workers=1):\n dataset = utils.prepare_dataset(dataset,\n self.config.batch_size,\n self.config.inputs,\n self.dtype,\n self.config.batch_decay)\n callbacks = [ModelCheckpoint(os.path.join(self.config.model_folder,\n '{epoch:03d}.hdf5'),\n monitor='val_loss',\n verbose=1,\n save_best_only=False,\n save_weights_only=False,\n mode='auto'),\n GeneratorCallback(self.config.test_string,\n self.config.inputs,\n self.config.generated_characters,\n self.dtype)\n ]\n for i in range(epochs):\n self.model.fit(dataset,\n initial_epoch=i,\n epochs=i + 1,\n verbose=verbose,\n use_multiprocessing=True,\n workers=workers,\n callbacks=callbacks)", "def train_model(model,\n dataset_info,\n steps_per_epoch,\n args):\n if args.mode not in ['train', 'finetune']:\n raise ValueError(\"train_model() called when in %s mode\" % args.mode)\n\n dataset_info, model_info = fill_info_dicts(dataset_info, args)\n\n train_batches = {name: model_info[name]['train_batch']\n for name in model_info}\n\n additional_encoder_kwargs = dict()\n\n for dataset_name in model_info:\n additional_encoder_kwargs[dataset_name] = dict()\n\n with open(args.encoder_config_file, 'r') as f:\n encoders = json.load(f)\n extract_fn = encoders[args.architecture][dataset_name]['extract_fn']\n embed_fn = encoders[args.architecture][dataset_name]['embed_fn']\n\n if embed_fn in ['embed_sequence', 'pretrained']:\n # TODO text_field_name ?\n if 'input_key' == 'weights':\n additional_encoder_kwargs[dataset_name]['weights'] = \\\n train_batches[\n dataset_name]['text_weights']\n elif embed_fn == 'pretrained':\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n elif embed_fn == 'tokenized_embed':\n additional_encoder_kwargs[dataset_name][\n 'precompute_path'] = args.precompute_path\n else:\n pass\n\n if extract_fn == \"serial_lbirnn\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n if args.experiment_name == \"RUDER_NAACL_18\":\n # use last token of last sequence as feature representation\n indices = train_batches[dataset_name][\n 'seq2_length']\n ones = tf.ones([tf.shape(indices)[0]], dtype=tf.int64)\n # last token is at pos. length-1\n indices = tf.subtract(indices, ones)\n additional_encoder_kwargs[dataset_name]['indices'] = indices\n elif extract_fn == \"lbirnn\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n elif extract_fn == \"serial_lbirnn_stock\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n elif extract_fn == \"dan\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n else:\n pass\n\n # losses = model.get_multi_task_loss(train_batches,\n # is_training=True,\n # additional_encoder_kwargs=additional_encoder_kwargs)\n\n # TODO multi loss\n losses = dict()\n for dataset in args.datasets:\n # import pdb\n # sess = tf.Session()\n # sess.run(model_info[dataset_name]['train_iter'].initializer)\n # batch = model_info[dataset_name]['train_batch']\n losses[dataset] = model.get_loss(train_batches[dataset],\n dataset,\n dataset,\n additional_encoder_kwargs=additional_encoder_kwargs,\n # sequence in train mode\n is_training=True)\n\n # import pdb\n # sess.run(model_info[dataset_name]['pred_iter'].initializer)\n # batch = model_info[dataset_name]['pred_batch']\n # text, weights = sess.run([batch['text'], batch['text_weights']])\n # pdb.set_trace()\n\n # # see if dropout and is_training working\n # # by checking train loss with different is_training the same\n\n # Done building compute graph; set up training ops.\n\n # Training ops\n global_step_tensor = tf.train.get_or_create_global_step()\n\n train_ops = dict()\n optim = tf.train.RMSPropOptimizer(learning_rate=args.lr0)\n for dataset_name in model_info:\n # tvars, grads = get_var_grads(losses[dataset_name])\n # train_ops[dataset_name] = get_train_op(tvars, grads, lr, args.max_grad_norm,\n # global_step_tensor, args.optimizer, name='train_op_{}'.format(dataset_name))\n train_ops[dataset_name] = optim.minimize(losses[dataset_name],\n global_step=global_step_tensor)\n\n # tvars, grads = get_var_grads(loss)\n # train_op = get_train_op(tvars, grads, lr, args.max_grad_norm,\n # global_step_tensor, args.optimizer, name='train_op')\n init_ops = [tf.global_variables_initializer(),\n tf.local_variables_initializer()]\n config = get_proto_config(args)\n\n # Get training objective. The inputs are:\n # 1. A dict of { dataset_key: dataset_iterator }\n #\n\n fill_eval_loss_op(args, model, dataset_info, model_info)\n fill_pred_op_info(dataset_info, model, args, model_info)\n fill_topic_op(args, model_info)\n\n print(\"All the variables after defining valid/test accuracy:\")\n all_variables = tf.global_variables()\n trainable_variables = tf.trainable_variables()\n total_trainable_parameters = 0\n for var in all_variables:\n if var in trainable_variables:\n print('(t) {}'.format(var))\n shape = var.get_shape()\n var_params = 1\n for dim in shape:\n var_params *= dim.value\n total_trainable_parameters += var_params\n else:\n print('( ) {}'.format(var))\n\n print(\"Total trainable parameters in this model={}\\n\\n\\n\".format(\n total_trainable_parameters))\n\n # # Add ops to save and restore all the variables.\n\n # latest checkpoint\n # saves every several steps\n # automatically done by tf.train.SingularMonitorSession with\n # tf.train.CheckpoinSaverHook\n\n # TODO load from some checkpoint dif at the beginning(?)\n saver_hook = tf.train.CheckpointSaverHook(\n checkpoint_dir=os.path.join(args.checkpoint_dir, 'latest'),\n save_steps=100)\n\n # saved model builders for each model\n # builders = init_builders(args, model_info)\n\n saver = tf.train.Saver(max_to_keep=100)\n\n with tf.train.SingularMonitoredSession(hooks=[saver_hook],\n config=config) as sess:\n\n if args.mode == 'train':\n sess.run(init_ops)\n\n else:\n assert len(args.datasets) == 1\n checkpoint_path_load = model_info[args.datasets[0]][\n 'checkpoint_path_load']\n saver.restore(sess, checkpoint_path_load)\n\n if args.summaries_dir:\n train_file_writer = tf.summary.FileWriter(\n os.path.join(args.summaries_dir, 'train'), graph=sess.graph)\n valid_file_writer = tf.summary.FileWriter(\n os.path.join(args.summaries_dir, 'valid'), graph=sess.graph)\n\n best_eval_performance = dict()\n for dataset_name in model_info:\n _train_init_op = model_info[dataset_name]['train_init_op']\n _valid_init_op = model_info[dataset_name]['valid_init_op']\n\n sess.run([_train_init_op, _valid_init_op])\n\n init_value = float('-inf')\n if args.tuning_metric == 'MAE_MACRO':\n init_value = float('inf')\n best_eval_performance[dataset_name] = {\"epoch\": -1,\n args.tuning_metric: init_value,\n \"performance\": None\n }\n\n best_total_tuning_metric = init_value\n best_tuning_metric_epoch = -1\n\n main_task_dev_tuning_metric = []\n stopping_criterion_reached = False\n early_stopping_dev_results = \"\"\n\n # Do training\n make_dir(os.path.dirname(args.log_file))\n with open(args.log_file, 'a') as f:\n f.write('VALIDATION RESULTS\\n')\n for epoch in xrange(1, args.num_train_epochs + 1):\n\n start_time = time()\n\n total_tuning_metric = 0.0\n\n # Take steps_per_epoch gradient steps\n total_loss = 0\n num_iter = 0\n # for _ in tqdm(xrange(steps_per_epoch)):\n # step, loss_v, _ = sess.run(\n # [global_step_tensor, loss, train_op])\n # num_iter += 1\n # total_loss += loss_v\n #\n # # loss_v is sum over a batch from each dataset of the average loss *per\n # # training example*\n # assert num_iter > 0\n #\n # average loss per batch (which is in turn averaged across examples)\n # train_loss = float(total_loss) / float(num_iter)\n\n for _ in tqdm(xrange(steps_per_epoch)):\n for (dataset_name, alpha) in zip(*[args.datasets, args.alphas]):\n loss_v, _ = sess.run(\n [losses[dataset_name], train_ops[dataset_name]])\n total_loss += alpha * loss_v\n step = sess.run(global_step_tensor)\n num_iter += 1\n assert num_iter > 0\n\n train_loss = float(total_loss) / float(num_iter)\n\n if args.summaries_dir:\n train_loss_summary = tf.Summary(\n value=[\n tf.Summary.Value(tag=\"loss\", simple_value=train_loss)])\n train_file_writer.add_summary(\n train_loss_summary, global_step=step)\n\n # Evaluate held-out tuning metric\n # if not args.test: # Validation mode\n # Get performance metrics on each dataset\n for dataset_name in args.datasets:\n _pred_op = model_info[dataset_name]['valid_pred_op']\n _eval_labels = model_info[dataset_name]['valid_batch'][\n args.label_key]\n _eval_iter = model_info[dataset_name]['valid_iter']\n _get_topic_op = model_info[dataset_name]['valid_topic_op']\n _loss_op = model_info[dataset_name]['valid_loss_op']\n _metrics = compute_held_out_performance(sess,\n _pred_op,\n _eval_labels,\n _eval_iter,\n metrics=dataset_info[\n dataset_name][\n 'metrics'],\n labels=dataset_info[\n dataset_name][\n 'labels'],\n args=args,\n get_topic_op=_get_topic_op,\n topic_path=dataset_info[\n dataset_name][\n 'topic_path'],\n eval_loss_op=_loss_op)\n model_info[dataset_name]['valid_metrics'] = _metrics\n\n end_time = time()\n elapsed = end_time - start_time\n\n # Manually compute the validation loss since each dataset is iterated through once\n # in a serial manner and not \"in parallel\" (i.e., a batch from each)\n valid_loss = 0.0\n for (dataset_name, alpha) in zip(*[args.datasets, args.alphas]):\n valid_loss += float(alpha) * \\\n model_info[dataset_name]['valid_metrics'][\n 'eval_loss']\n\n main_task_tuning_metric = model_info[args.datasets[0]\n ]['valid_metrics'][args.tuning_metric]\n\n if args.summaries_dir:\n valid_loss_summary = tf.Summary(\n value=[\n tf.Summary.Value(tag=\"loss\", simple_value=valid_loss)])\n valid_file_writer.add_summary(\n valid_loss_summary, global_step=step)\n valid_main_task_tuning_metric_summary = tf.Summary(value=[\n tf.Summary.Value(tag=\"main-task-\" + args.tuning_metric,\n simple_value=main_task_tuning_metric)])\n valid_file_writer.add_summary(\n valid_main_task_tuning_metric_summary,\n global_step=step)\n\n if (\n main_task_tuning_metric >= args.early_stopping_acc_threshold) and (\n len(main_task_dev_tuning_metric) >= args.patience) and (\n main_task_tuning_metric < main_task_dev_tuning_metric[\n -args.patience]):\n print(\n \"Stopping early at epoch {} (patience={}, early stopping acc threshold={})\".format(\n epoch, args.patience,\n args.early_stopping_acc_threshold))\n stopping_criterion_reached = True\n\n main_task_dev_tuning_metric.append(main_task_tuning_metric)\n\n if args.reporting_metric != \"Acc\":\n main_task_performance = \\\n model_info[args.datasets[0]]['valid_metrics'][\n args.reporting_metric]\n if args.summaries_dir:\n valid_main_task_performance_summary = tf.Summary(value=[\n tf.Summary.Value(\n tag=\"main-task-{}\".format(args.reporting_metric),\n simple_value=main_task_performance)])\n valid_file_writer.add_summary(\n valid_main_task_performance_summary,\n global_step=step)\n\n # Log performance(s)\n str_ = '[epoch=%d/%d step=%d (%d s)] train_loss=%s valid_loss=%s (per batch)' % (\n epoch, args.num_train_epochs, np.asscalar(step), elapsed,\n train_loss, valid_loss)\n\n for dataset_name in args.datasets:\n _num_eval_total = model_info[dataset_name]['valid_metrics'][\n 'ntotal']\n # TODO use other metric here for tuning\n _eval_tuning_metric = model_info[dataset_name]['valid_metrics'][\n args.tuning_metric]\n # _eval_align_acc = model_info[dataset_name]['valid_metrics'][\n # 'aligned_accuracy']\n\n str_ += '\\n(%s) ' % dataset_name\n for m, s in model_info[dataset_name]['valid_metrics'].items():\n if (dataset_name == args.datasets[0]) and (\n m == args.reporting_metric): # main task\n str_ += '**%s=%f** ' % (m, s)\n elif m == args.tuning_metric:\n str_ += '*%s=%f* ' % (m, s)\n elif m == 'Confusion_Matrix':\n pass\n else:\n str_ += '%s=%f ' % (m, s)\n if 'Confusion_Matrix' in model_info[dataset_name][\n 'valid_metrics']:\n str_ += 'Confusion_Matrix:\\n'\n str_ += '\\n'.join(' '.join('%4d' % x for x in y) for y in\n model_info[dataset_name]['valid_metrics'][\n 'Confusion_Matrix'])\n\n # Track best-performing epoch for each dataset\n # use the newest best epoch for test\n if _eval_tuning_metric >= best_eval_performance[dataset_name][\n args.tuning_metric]:\n best_eval_performance[dataset_name][args.tuning_metric] = \\\n _eval_tuning_metric\n best_eval_performance[dataset_name][\"performance\"] = \\\n model_info[dataset_name]['valid_metrics'].copy()\n best_eval_performance[dataset_name][\"epoch\"] = epoch\n # save best model\n saver.save(sess.raw_session(),\n model_info[dataset_name]['checkpoint_path'])\n\n # # test\n # saver.save(sess.raw_session(), checkpoint_path)\n\n total_tuning_metric += _eval_tuning_metric\n\n # Track best-performing epoch for collection of datasets\n\n if total_tuning_metric >= best_total_tuning_metric:\n best_total_tuning_metric = total_tuning_metric\n best_tuning_metric_epoch = epoch\n best_epoch_results = str_\n if len(args.datasets) > 1:\n saver.save(sess.raw_session(),\n os.path.join(args.checkpoint_dir, 'MULT',\n 'model'))\n\n logging.info(str_)\n\n # Log dev results in a file\n with open(args.log_file, 'a') as f:\n f.write(str_ + '\\n')\n\n if stopping_criterion_reached:\n saver.save(sess.raw_session(),\n os.path.join(args.checkpoint_dir, 'early-stopping',\n 'model'))\n\n early_stopping_dev_results = str_\n # with open(args.log_file, 'a') as f:\n # f.write('\\nSTOPPED EARLY AFTER {} EPOCHS\\n'.format(epoch))\n # f.write(str_ + '\\n')\n break\n\n print(best_eval_performance)\n print(\n 'Best total {}: {} at epoch {}'.format(\n args.tuning_metric,\n best_total_tuning_metric,\n best_tuning_metric_epoch))\n print(best_epoch_results)\n\n with open(args.log_file, 'a') as f:\n # f.write(best_eval_acc + '\\n')\n # f.write('Best total accuracy: {} at epoch {}'.format(best_total_acc,\n # best_total_acc_epoch))\n f.write('\\nBest single-epoch performance across all datasets\\n')\n f.write(best_epoch_results + '\\n\\n')\n\n # Write (add) the result to a common report file\n with open(args.log_file, 'a') as f:\n for dataset in best_eval_performance.keys():\n f.write(str(dataset))\n f.write(\" \")\n f.write(\"\\n\")\n for dataset, values in best_eval_performance.items():\n f.write(\n 'Metrics on highest-{} epoch for dataset {}: {}\\n'.format(\n args.tuning_metric, dataset, values))\n\n f.write('Best total {}: {} at epoch {}\\n\\n'.format(\n args.tuning_metric, best_total_tuning_metric,\n best_tuning_metric_epoch))\n if stopping_criterion_reached:\n f.write('STOPPED EARLY AFTER {} EPOCHS\\n'.format(epoch))\n f.write(early_stopping_dev_results + '\\n\\n')\n\n if args.summaries_dir:\n train_file_writer.close()\n valid_file_writer.close()", "def start_training(model, train_generator, test_generator, image_files, filename, job, epochs):\n \n from keras.models import load_model\n from keras.callbacks import ModelCheckpoint\n\n _logger.debug(\"Start Training\")\n if epochs == 0:\n _logger.error(\"cannot train for 0 epochs\")\n return False, False, False, False, False\n training_complete = 0\n path = os.path.join(config.TRAINED_MODELS_DATA, filename)\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n callbacks = [ModelCheckpoint(\n filepath = os.path.join(path, '{}tmp.h5'.format(filename)), \n monitor='val_loss', \n verbose=0, \n save_best_only=False, \n save_weights_only=False, \n mode='auto', \n period=int(0.1 * epochs))]\n state = api.update_job_state(job, 'training', 'Start training for {} epochs'.format(epochs))\n history_acc = []\n for i in range(1, epochs+1):\n r = model.fit_generator(\n train_generator,\n validation_data=test_generator,\n epochs=1,\n callbacks = callbacks,\n steps_per_epoch= (0.8 * len(image_files)) / batch_size,\n validation_steps=(0.2 * len(image_files)) / batch_size,\n )\n history_acc.append(r.history['acc'][-1])\n with open(os.path.join(os.path.join(config.TRAINED_MODELS_DATA, filename), 'info.txt'), 'w') as outfile:\n outfile.write(str(i))\n \n if len(history_acc) < epochs:\n training_complete = 0\n return training_complete, r.history['acc'][-1], r.history['loss'][-1], r.history['val_acc'][-1], r.history['val_loss'][-1]\n else:\n model_file = os.path.join(path, ('{}.h5'.format(filename)))\n model.save(model_file)\n training_complete = 1\n return training_complete, r.history['acc'][-1], r.history['loss'][-1], r.history['val_acc'][-1], r.history['val_loss'][-1]\n else:\n _logger.debug(\"Loading existing model file\")\n model_ = load_model(os.path.join(path, '{}tmp.h5'.format(filename)))\n with open(os.path.join(os.path.join(config.TRAINED_MODELS_DATA, filename), 'info.txt'), 'r') as outfile:\n last_epoch = int(outfile.read())\n if not model_ == None:\n callbacks = [ModelCheckpoint(\n filepath = os.path.join(path, '{}tmp.h5'.format(filename)), \n monitor='val_loss', \n verbose=0, \n save_best_only=False, \n save_weights_only=False, \n mode='auto', \n period=int(0.1 * epochs))]\n state = api.update_job_state(job, 'training', 'Start training for {} epochs'.format(epochs))\n history_acc = []\n if last_epoch == epochs:\n for i in range(1, epochs+1):\n r = model_.fit_generator(\n train_generator,\n validation_data=test_generator,\n epochs=1,\n callbacks = callbacks,\n steps_per_epoch= (0.8 * len(image_files)) / batch_size,\n validation_steps=(0.2 * len(image_files)) / batch_size,\n )\n history_acc.append(r.history['acc'][-1])\n with open(os.path.join(os.path.join(config.TRAINED_MODELS_DATA, filename), 'info.txt'), 'w') as outfile:\n outfile.write(str(i))\n else:\n for i in range(1, (epochs+1)-last_epoch):\n r = model_.fit_generator(\n train_generator,\n validation_data=test_generator,\n epochs=1,\n callbacks = callbacks,\n steps_per_epoch= (0.8 * len(image_files)) / batch_size,\n validation_steps=(0.2 * len(image_files)) / batch_size,\n )\n history_acc.append(r.history['acc'][-1])\n with open(os.path.join(os.path.join(config.TRAINED_MODELS_DATA, filename), 'info.txt'), 'w') as outfile:\n outfile.write(str(i+last_epoch))\n else:\n _logger.error(\"model file missing\")\n return False, False, False, False, False\n if len(history_acc) < (epochs - last_epoch):\n training_complete = 0\n return training_complete, r.history['acc'][-1], r.history['loss'][-1], r.history['val_acc'][-1], r.history['val_loss'][-1]\n else:\n model_file = os.path.join(path,('{}.h5'.format(filename)))\n model_.save(model_file)\n training_complete = 1\n return training_complete, r.history['acc'][-1], r.history['loss'][-1], r.history['val_acc'][-1], r.history['val_loss'][-1]", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def train(args, model, train_data_loader, dev_data_loader, device):\n\n\tmodel.train()\n\toptimizer = torch.optim.Adam(model.parameters())\n\tprint_loss_total = 0\n\tepoch_loss_total = 0\n\tstart = time.time()\n\n\t#### modify the following code to complete the training funtion\n\n\tbest_train_acc, best_dev_acc = 0.0, 0.0\n\n\tfor idx, batch in enumerate(train_data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t#### Your code here ----\n\n\t\t# zero out\n\t\toptimizer.zero_grad()\n\n\t\t# get output from model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# use loss_fn defined above to calculate loss\n\t\tloss = loss_fn(logits, labels)\n\n\t\t# use accuracy_fn defined above to calculate 'error' and number of examples ('num_examples') used to\n\t\t# calculate accuracy below.\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# backprop\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\t###Your code ends ---\n\t\taccuracy = 1 - error / num_examples\n\t\tclip_grad_norm_(model.parameters(), 5)\n\t\tprint_loss_total += loss.data.numpy()\n\t\tepoch_loss_total += loss.data.numpy()\n\n\t\tif (idx + 1) % args.checkpoint == 0 and idx > 0:\n\t\t\tprint_loss_avg = print_loss_total / args.checkpoint\n\n\t\t\tdev_acc = evaluate(dev_data_loader, model, device)\n\n\t\t\tprint('number of steps: %d, train loss: %.5f, train acc: %.3f, dev acc: %.3f, time: %.5f'\n\t\t\t % (idx + 1, print_loss_avg, accuracy, dev_acc, time.time() - start))\n\t\t\tprint_loss_total = 0\n\t\t\tif accuracy > best_train_acc:\n\t\t\t\tbest_train_acc = accuracy\n\t\t\tif dev_acc > best_dev_acc:\n\t\t\t\tbest_dev_acc = dev_acc\n\n\treturn best_train_acc, best_dev_acc", "def train():\n\t# 1、make dataloader\n\ttrain_loader, val_loader, num_query, num_class = make_data_loader(cfg)\n\t#print(\"num_query:{},num_class:{}\".format(num_query,num_class))\n\n\t# 2、make model\n\tmodel = build_model(cfg, num_class)\n\n\t# model.eval()\n\t# x = model(img_tensor)\n\t# print(x.shape)\n\t# 3、 make optimizer\n\toptimizer = make_optimizer(cfg, model)\n\n\t# 4、 make lr_scheduler\n\tscheduler = make_lr_scheduler(cfg, optimizer)\n\n\t# 5、 make loss_func\n\tif cfg.MODEL.PCB_NECK:\n\t\t# make loss specificially for pcb \n\t\tloss_func = get_softmax_triplet_loss_fn(cfg, num_class)\n\telse:\n\t\tloss_func = make_loss(cfg, num_class)\n\n\t# get paramters\n\tlog_period = cfg.OUTPUT.LOG_PERIOD \n\tckpt_period =cfg.OUTPUT.CHECKPOINT_PERIOD\n\teval_period = cfg.OUTPUT.EVAL_PERIOD\n\toutput_dir = cfg.OUTPUT.ROOT_DIR\n\tdevice = cfg.MODEL.DEVICE\n\tepochs = cfg.SOLVER.MAX_EPOCHS\n\tuse_gpu = device == \"cuda\"\n\tuse_neck = cfg.MODEL.NECK or cfg.MODEL.LEARN_REGION \n\t# how many batch for each log\n\tbatch_size = cfg.SOLVER.IMGS_PER_BATCH\n\tbatch_num = len(train_loader) \n\t\n\tlog_iters = batch_num // log_period\n\tpretrained = cfg.MODEL.PRETRAIN_PATH != ''\n\tparallel = cfg.MODEL.PARALLEL \t\n\tgrad_clip = cfg.DARTS.GRAD_CLIP \n\n\tfeat_norm = cfg.TEST.FEAT_NORM \n\tckpt_save_path = cfg.OUTPUT.ROOT_DIR + cfg.OUTPUT.CKPT_DIR\n\tif not os.path.exists(ckpt_save_path):\n\t\tos.makedirs(ckpt_save_path)\n\n\n\t# create *_result.xlsx\n\t# save the result for analyze\n\tname = (cfg.OUTPUT.LOG_NAME).split(\".\")[0] + \".xlsx\"\n\tresult_path = cfg.OUTPUT.ROOT_DIR + name\n\n\twb = xl.Workbook()\n\tsheet = wb.worksheets[0]\n\ttitles = ['size/M','speed/ms','final_planes', 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss',\n\t\t\t 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss','acc', 'mAP', 'r1', 'r5', 'r10', 'loss']\n\tsheet.append(titles)\n\tcheck_epochs = [40, 80, 120, 160, 200, 240, 280, 320, 360, epochs]\n\tvalues = []\n\n\tlogger = logging.getLogger('MobileNetReID.train')\n\t\n\t# count parameter\n\tsize = count_parameters(model)\n\tlogger.info(\"the param number of the model is {:.2f} M\".format(size))\n\t\n\tvalues.append(format(size, '.2f'))\n\tvalues.append(model.final_planes)\n\n\tlogger.info(\"Start training\")\n\t\n\t#count = 183, x, y = batch -> 11712 for train\n\tif pretrained:\n\t\tstart_epoch = model.start_epoch\n\n\tif parallel:\n\t\tmodel = nn.DataParallel(model)\n\n\tif use_gpu:\n\t\t# model = nn.DataParallel(model)\n\t\tmodel.to(device)\n\t\n\t# save the best model\n\tbest_mAP, best_r1 = 0., 0.\n\tis_best = False\n\t# batch : img, pid, camid, img_path\n\tavg_loss, avg_acc = RunningAverageMeter(), RunningAverageMeter()\n\tavg_time, global_avg_time = AverageMeter(), AverageMeter()\n\tglobal_avg_time.reset()\n\tfor epoch in range(epochs):\n\t\tscheduler.step()\n\n\t\tif pretrained and epoch < start_epoch - 1:\n\t\t\tcontinue\n\t\n\t\tmodel.train()\n\t\t# sum_loss, sum_acc = 0., 0.\n\t\tavg_loss.reset()\n\t\tavg_acc.reset()\n\t\tavg_time.reset()\n\t\tfor i, batch in enumerate(train_loader):\n\n\t\t\tt0 = time.time()\n\t\t\timgs,labels = batch\n\n\t\t\tif use_gpu:\n\t\t\t\timgs = imgs.to(device)\n\t\t\t\tlabels = labels.to(device)\n\n\t\t\tres = model(imgs)\n\t\t\t# score, feat = model(imgs)\n\t\t\t# loss = loss_func(score, feat, labels)\n\t\t\tloss, acc = compute_loss_acc(use_neck, res, labels, loss_func)\n\t\t\t\n\t\t\tloss.backward()\n\t\t\tif grad_clip != 0:\n\t\t\t\tnn.utils.clip_grad_norm(model.parameters(), grad_clip)\n\n\t\t\toptimizer.step()\n\n\t\t\toptimizer.zero_grad()\n\n\t\t\t# acc = (score.max(1)[1] == labels).float().mean()\n\n\t\t\t# sum_loss += loss\n\t\t\t# sum_acc += acc \n\t\t\tt1 = time.time()\n\t\t\tavg_time.update((t1 - t0) / batch_size)\n\t\t\tavg_loss.update(loss)\n\t\t\tavg_acc.update(acc)\n\n\t\t\t#log the info \n\t\t\tif (i+1) % log_iters == 0:\n\n\t\t\t\tlogger.info(\"epoch {}: {}/{} with loss is {:.5f} and acc is {:.3f}\".format(\n\t\t\t\t\t epoch+1, i+1, batch_num, avg_loss.avg, avg_acc.avg))\n\n\t\tlr = optimizer.state_dict()['param_groups'][0]['lr']\n\t\tlogger.info(\"end epochs {}/{} with lr: {:.5f} and avg_time is {:.3f} ms\".format(epoch+1, epochs, lr, avg_time.avg * 1000))\n\t\tglobal_avg_time.update(avg_time.avg)\n\t\t# change the lr \n\n\t\t# eval the model \n\t\tif (epoch+1) % eval_period == 0 or (epoch + 1) == epochs :\n\t\t\t\n\t\t\tmodel.eval()\n\t\t\tmetrics = R1_mAP(num_query, use_gpu = use_gpu, feat_norm = feat_norm)\n\n\t\t\twith torch.no_grad():\n\n\t\t\t\tfor vi, batch in enumerate(val_loader):\n\t\t\t\t\t\n\t\t\t\t\timgs, labels, camids = batch\n\n\t\t\t\t\tif use_gpu:\n\t\t\t\t\t\timgs = imgs.to(device)\n\n\t\t\t\t\tfeats = model(imgs)\n\t\t\t\t\tmetrics.update((feats,labels, camids))\n\n\t\t\t\t#compute cmc and mAP\n\t\t\t\tcmc, mAP = metrics.compute()\n\t\t\t\tlogger.info(\"validation results at epoch:{}\".format(epoch + 1))\n\t\t\t\tlogger.info(\"mAP:{:.2%}\".format(mAP))\n\t\t\t\tfor r in [1,5,10]:\n\t\t\t\t\tlogger.info(\"CMC curve, Rank-{:<3}:{:.2%}\".format(r,cmc[r-1]))\t\n\n\t\t\t\t# determine whether cur model is the best \n\t\t\t\tif mAP > best_mAP:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_mAP = mAP\n\t\t\t\t\tlogger.info(\"Get a new best mAP\")\n\t\t\t\tif cmc[0] > best_r1:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_r1 = cmc[0]\n\t\t\t\t\tlogger.info(\"Get a new best r1\")\n\n\t\t\t\t# add the result to sheet\n\t\t\t\tif (epoch + 1) in check_epochs:\n\t\t\t\t\tval = [avg_acc.avg, mAP, cmc[0], cmc[4], cmc[9]]\n\t\t\t\t\tchange = [format(v * 100, '.2f') for v in val]\n\t\t\t\t\tchange.append(format(avg_loss.avg, '.3f'))\n\t\t\t\t\tvalues.extend(change)\n\n\n\t\t# we hope that eval_period == ckpt_period or eval_period == k* ckpt_period where k is int\t\t\t\n\t\t# whether to save the model\n\t\tif (epoch+1) % ckpt_period == 0 or is_best:\n\n\t\t\tif parallel:\n\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\t\t\telse:\n\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\n\t\t\tlogger.info(\"checkpoint {} saved !\".format(epoch + 1))\n\n\t\t\tif is_best:\n\t\t\t\tif parallel:\n\t\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\telse:\n\t\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\tlogger.info(\"best checkpoint was saved\")\n\t\t\t\tis_best = False\n\t\n\tvalues.insert(1, format(global_avg_time.avg * 1000, '.2f'))\n\tsheet.append(values)\n\twb.save(result_path)\n\n\tlogger.info(\"training is end, time for per imgs is {} ms\".format(global_avg_time.avg *1000))", "def train_model(model, loss_fn, optimizer, train_generator, dev_generator):\r\n ########## YOUR CODE HERE ##########\r\n # TODO: Given a model, data, and loss function, you should do the following:\r\n EPOCHS = 20\r\n dev_losses = []\r\n for iepoch in range(EPOCHS): \r\n # TODO: 1) Loop through the whole train dataset performing batch optimization with torch.optim.Adam\r\n for train_batch, train_label in train_generator:\r\n # Zero the gradients\r\n model.zero_grad()\r\n # Compute the loss\r\n loss = loss_fn(model(train_batch),train_label)\r\n # perform a backward pass (backpropagation)\r\n loss.backward()\r\n # Update the parameters\r\n optimizer.step()\r\n\r\n # TODO: 2) Each time you reach the end of the train dataset (one \"epoch\"), calculate the loss on the whole dev set;\r\n dev_loss = 0\r\n for ibatch, ilabel in dev_generator:\r\n dev_loss += loss_fn(model(ibatch), ilabel)\r\n\r\n # TODO: Make sure to print the dev set loss each epoch to stdout.\r\n print(\"Epoch:\", iepoch+1, \", dev loss:\", dev_loss)\r\n dev_losses.append(dev_loss)\r\n\r\n # TODO and 3) stop training and return the model once the development loss stops improving (called early stopping).\r\n if iepoch > 1 and dev_losses[-2]-dev_loss < 0.01:\r\n break\r\n return model", "def train_model(self, train_dataloader, val_dataloader, epochs=20, gpus=0):\n seed()\n trainer = pl.Trainer(gpus=gpus, gradient_clip_val=0.01)\n net = NBeats.from_dataset(self.training, learning_rate=3e-2, weight_decay=1e-2, widths=[32, 512],\n backcast_loss_ratio=0.1)\n # find optimal learning rate\n res = trainer.tuner.lr_find(net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader,\n min_lr=1e-5)\n net.hparams.learning_rate = res.suggestion()\n\n # Fit the model\n early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n trainer = pl.Trainer(\n max_epochs=epochs,\n gpus=gpus,\n weights_summary=\"top\",\n gradient_clip_val=0.01,\n callbacks=[early_stop_callback],\n limit_train_batches=30, # TO-DO : need to be dynamic\n )\n\n net = NBeats.from_dataset(\n self.training,\n learning_rate=net.hparams.learning_rate,\n log_interval=10,\n log_val_interval=1,\n weight_decay=1e-2,\n widths=[32, 512],\n backcast_loss_ratio=1.0,\n )\n\n trainer.fit(\n net,\n train_dataloaders=train_dataloader,\n val_dataloaders=val_dataloader,\n )\n\n # Get the best model\n best_model_path = trainer.checkpoint_callback.best_model_path\n model_name = best_model_path.split('\\\\')[-1]\n shutil.copy(best_model_path, os.path.join(DIR, 'model'))\n try:\n os.remove(os.path.join(DIR, 'model/nbeats.ckpt'))\n except OSError:\n pass\n os.rename(os.path.join(DIR, 'model/' + model_name), os.path.join(DIR, 'model/nbeats.ckpt'))", "def train_fru(model, epochs=EPOCHS):\n train(model, epochs=epochs, dataset=FRUDataset)", "def train(self, ):\n raise NotImplementedError", "def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def entry(self):\n if not os.path.isfile('model'):\n train()\n schedule.every(0.01).seconds.do(predict, self)\n while True:\n schedule.run_pending()", "def train(self, batch):\n pass", "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = AverageMeter()\n epoch_time = Timer()\n \n for batch_idx, (input_idxs, target_idxs, input_tokens, target_tokens) in enumerate(data_loader):\n # input_idxs and target_idxs have dim (batch_size x max_len)\n # they are NOT sorted by length\n\n lengths = (input_idxs != 0).long().sum(dim=1)\n sorted_lengths, order = torch.sort(lengths, descending=True)\n\n input_variable = Variable(input_idxs[order, :][:, :max(lengths)])\n target_variable = Variable(target_idxs[order, :])\n \n model.optimizer.zero_grad()\n output_log_probs, output_ses = model(input_variable,\n list(sorted_lengths),\n targets=target_variable)\n \n batch_size = input_variable.shape[0]\n flattened_outputs = output_log_probs.view(batch_size * model.max_length, -1)\n \n batch_loss = model.citerion(flattened_outputs, target_variable.contiguous().view(-1))\n batch_loss.backward()\n model.optimizer.step()\n \n model.updates += 1\n \n train_loss.update(batch_loss[0], batch_size)\n \n if batch_idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], batch_idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n \n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n \n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "def _train_model(model, X, y, epochs=20):\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n print('.', end='')\n\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=2)\n\n # Now actually train the model\n return (model.fit(\n X,\n y,\n epochs=epochs,\n validation_split=0.1,\n verbose=1,\n callbacks=[early_stop, PrintDot()]\n ), model)", "def train_model_epoch(self, epoch_idx, tuning=False):\n acc_loss = 0\n\n num_batch = self.model.config.kg_meta.tot_train_triples // self.config.batch_size if not self.debug else 10\n \n metrics_names = ['acc_loss', 'loss'] \n progress_bar = tf.keras.utils.Progbar(num_batch, stateful_metrics=metrics_names)\n\n for batch_idx in range(num_batch):\n data = list(next(self.generator))\n \n if self.training_strategy == \"projection_based\":\n h = tf.convert_to_tensor(data[0], dtype=tf.int32)\n r = tf.convert_to_tensor(data[1], dtype=tf.int32)\n t = tf.convert_to_tensor(data[2], dtype=tf.int32)\n hr_t = data[3] # tf.convert_to_tensor(data[3], dtype=tf.float32)\n rt_h = data[4] # tf.convert_to_tensor(data[4], dtype=tf.float32)\n loss = self.train_step_projection(h, r, t, hr_t, rt_h)\n elif self.training_strategy == \"pointwise_based\":\n h = tf.convert_to_tensor(data[0], dtype=tf.int32)\n r = tf.convert_to_tensor(data[1], dtype=tf.int32)\n t = tf.convert_to_tensor(data[2], dtype=tf.int32)\n y = tf.convert_to_tensor(data[3], dtype=tf.float32)\n loss = self.train_step_pointwise(h, r, t, y)\n else:\n ph = tf.convert_to_tensor(data[0], dtype=tf.int32)\n pr = tf.convert_to_tensor(data[1], dtype=tf.int32)\n pt = tf.convert_to_tensor(data[2], dtype=tf.int32)\n nh = tf.convert_to_tensor(data[3], dtype=tf.int32)\n nr = tf.convert_to_tensor(data[4], dtype=tf.int32)\n nt = tf.convert_to_tensor(data[5], dtype=tf.int32)\n loss = self.train_step(ph, pr, pt, nh, nr, nt)\n\n acc_loss += loss\n\n if not tuning:\n progress_bar.add(1, values=[('acc_loss', acc_loss), ('loss', loss)])\n\n self.training_results.append([epoch_idx, acc_loss.numpy()])\n\n return acc_loss", "def train(train_data_gen,\n model,\n centers,\n optimizer,\n checkpoint,\n manager,\n epoch,\n nofsteps,\n save_ckpt_freq=100,\n log_freq=25):\n avg_loss = tf.keras.metrics.Mean(name='total_loss', dtype=tf.float32)\n for steps in tqdm(range(nofsteps), desc='Training Epoch {}'.format(epoch)):\n start_time = time.time()\n images, labels = next(train_data_gen)\n metric_loss, centers = train_step(images, labels, model, centers, optimizer)\n duration = time.time() - start_time\n # update loss states\n avg_loss.update_state(metric_loss)\n\n # save running speed for every iterations\n tf.summary.scalar('train/learning_rate',\n optimizer.learning_rate(optimizer.iterations),\n step=optimizer.iterations)\n tf.summary.scalar('train/time_per_step',\n duration,\n step=optimizer.iterations)\n\n if tf.equal(optimizer.iterations % log_freq, 0) or tf.equal(optimizer.iterations % save_ckpt_freq, 0) or \\\n tf.equal(optimizer.iterations % (nofsteps - 1), 0):\n tf.summary.scalar('train/loss/total_loss', avg_loss.result(), step=optimizer.iterations)\n checkpoint.step.assign(tf.cast(optimizer.iterations, tf.int32))\n print('\\n Global Step : %s \\t [ Epoch / Step ] %s / %s \\t Training Loss: %f'\n '\\t Learning Rate: %f' % (int(checkpoint.step), epoch, steps + 1, float(metric_loss),\n optimizer.learning_rate(optimizer.iterations).numpy()))\n # save checkpoints\n print(\"Saved Checkpoint for Step {}: {}\".format(\n int(checkpoint.step), manager.save(checkpoint_number=int(checkpoint.step))))\n avg_loss.reset_states()", "def train_epoch(self):\n\n if self._train_data_set is not None and self._train_data_set is not None:\n self._model.fit_num_epochs(self._train_data_set, self._test_data_set)\n else:\n raise RuntimeError(\"[Triggerbot]: No training or test set available\")", "def train(X_train, y_train, save_model='model.h5'):\n \n # Hyperparameters\n batch_size = 32\n epochs = 30\n learning_rate = 0.001\n \n # Loading model from model.py\n model = m(input_height=IMAGE_HEIGHT, input_width=IMAGE_WIDTH)\n \n # Plot model as image\n plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n \n # If trained model exist already then load first for further training\n if tf.gfile.Exists(save_model):\n model.load_weights(save_model)\n model.compile(loss='mse', optimizer=Adam(learning_rate))\n \n # Only save model which has best performed on validation set.\n # These are callbacks which are being used in \"model.fit\" call\n earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='min')\n mcp_save = ModelCheckpoint('model.h5', save_best_only=True, monitor='val_loss', mode='min')\n reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')\n\n # Train the model\n model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.2, shuffle=True)\n \n return", "def train_model(model, train, validation):\n # Add your code here\n\n monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, restore_best_weights=True)\n\n model.fit_generator(\n generator=train,\n validation_data=validation,\n epochs=1000,\n callbacks=monitor\n\n )\n # Preprocessing (Enrichment)\n # Preprocessing (Normalisation)\n\n return model", "def train_model(model, train_gen, valid_gen, class_weights=None):\n start_train_time = time.time()\n history = model.fit_generator(\n generator=train_gen, epochs=nnc.NUM_EPOCHS, verbose=nnc.VERBOSE_LVL,\n validation_data=valid_gen, class_weight=class_weights,\n use_multiprocessing=True, workers=nnc.NUM_WORKERS\n )\n end_train_time = time.time()\n\n print(\"Training the model took {} seconds.\".format(\n end_train_time - start_train_time))\n\n return history", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n\n if self.config[\"amp\"]:\n # AMP!\n with autocast():\n output = self.model(data)\n loss = self.criterion(output, target)\n else:\n output = self.model(data)\n loss = self.criterion(output, target)\n\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), loss.item()\n )\n )\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def train(model, criterion, optimizer, scheduler, loaders, callbacks, logdir, num_epochs, verbose):\n \n model = model.cuda()\n for i in range(num_epochs):\n bset_loss = 99999999\n custom_train(model, criterion, optimizer, loaders[\"train\"])\n loss = evaluate(model, loaders[\"valid\"])\n with open(f\"{logdir}/logs.txt\", 'a') as f:\n print(f\"loss:{loss} epoch:{i}\", file=f)\n \n if bset_loss >= loss:\n torch.save(model.state_dict(), f\"{logdir}/checkpoints/best.pth\")\n bset_loss = loss", "def setup_training(model, batcher):\r\n train_dir = os.path.join(FLAGS.log_root, \"train\")\r\n if FLAGS.finetune:\r\n if not os.path.exists(train_dir):\r\n print (util.bcolors.OKGREEN + 'Copying See et al. pre-trained model (%s) to (%s) to be fine-tuned' % (os.path.join(FLAGS.pretrained_path, 'train'), train_dir) + util.bcolors.ENDC)\r\n os.makedirs(train_dir)\r\n files = glob.glob(os.path.join(os.path.join(FLAGS.pretrained_path, 'train'), \"*model*\"))\r\n files.extend(glob.glob(os.path.join(os.path.join(FLAGS.pretrained_path, 'train'), \"*checkpoint*\")))\r\n for file in files:\r\n if os.path.isfile(file):\r\n shutil.copy2(file, train_dir)\r\n if not os.path.exists(train_dir): os.makedirs(train_dir)\r\n\r\n model.build_graph() # build the graph\r\n if FLAGS.convert_to_coverage_model:\r\n assert FLAGS.coverage, \"To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True\"\r\n convert_to_coverage_model()\r\n if FLAGS.restore_best_model:\r\n restore_best_model()\r\n saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time\r\n\r\n sv = tf.train.Supervisor(logdir=train_dir,\r\n is_chief=True,\r\n saver=saver,\r\n summary_op=None,\r\n save_summaries_secs=60, # save summaries for tensorboard every 60 secs\r\n save_model_secs=60, # checkpoint every 60 secs\r\n global_step=model.global_step)\r\n summary_writer = sv.summary_writer\r\n logging.info(\"Preparing or waiting for session...\")\r\n sess_context_manager = sv.prepare_or_wait_for_session(config=util.get_config())\r\n logging.info(\"Created session.\")\r\n try:\r\n run_training(model, batcher, sess_context_manager, sv, summary_writer) # this is an infinite loop until interrupted\r\n except KeyboardInterrupt:\r\n logging.info(\"Caught keyboard interrupt on worker. Stopping supervisor...\")\r\n sv.stop()", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train(self, epochs=5):\n x_train, y_train, x_test, y_test = self._load_data()\n x_train = tf.keras.utils.normalize(x_train, axis=1) # Scale between 0-1\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n model = tf.keras.models.Sequential()\n # 28 x 28 (digits dimensions) -> flat 784\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n # neurons -> number of classification\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n dtnow = datetime.now().strftime(\"%Y-%m-%dT%H:%M\")\n tb_logs = self._artifact_repo.artifact_path(self._TENSORBOARD_LOGS)\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir='{}/{}'.format(tb_logs, dtnow))\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n model.fit(x_train, y_train, epochs=int(epochs), validation_data=(x_test, y_test), callbacks=[tensorboard])\n\n # val_loss, val_acc = model.evaluate(x_test, y_test)\n\n # self._logger.info(\"Evaluation on test dataset: Loss: %s, Accuracy: %s\", val_loss, val_acc)\n\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model.save(path)", "def train_model(model, epochs, optimizer, loss_function, train_iterator, valid_iterator):\n for epoch in range(epochs):\n model.train()\n train_loss = 0.0\n train_acc = 0.0\n for i, batch in enumerate(train_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n optimizer.zero_grad()\n\n output = model(feature, batch_length)\n\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n train_acc += acc.item()\n print(\n f\"Train:: Epoch: {epoch}, Loss: {train_loss / len(train_iterator)}, Accuracy: {train_acc / len(train_iterator)}\")\n\n model.eval()\n val_loss = 0.0\n val_acc = 0.0\n for i, batch in enumerate(valid_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n\n output = model(feature, batch_length)\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n val_loss += loss.item()\n val_acc += acc.item()\n\n print(\n f\"Validation:: Epoch: {epoch}, Loss: {val_loss / len(valid_iterator)}, Accuracy: {val_acc / len(valid_iterator)}\")\n print(\"\")", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def train(self):\n raise NotImplementedError()", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\tloss = self.loss(self.model(Input)[:,0],Output)\n\t\t\ttrain_loss += loss.item()\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\t\t\tself.current_iteration += 1\n\n\t\tself.summary_writer.add_scalar('training/loss', loss.item(), self.current_epoch)", "def train(model, train_loader, epochs, optimizer, loss_fn, device):\n \n # TODO: Paste the train() method developed in the notebook here.\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n batch_X = batch_X.unsqueeze(-1)\n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n model.zero_grad()\n output=model.forward(batch_X)\n loss=loss_fn(output.squeeze(),batch_y)\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n if epoch%10 == 0:\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1" ]
[ "0.7835988", "0.765603", "0.73078525", "0.72385854", "0.71432185", "0.7102481", "0.7094969", "0.70912015", "0.70912015", "0.70912015", "0.70912015", "0.70906556", "0.7049515", "0.70454556", "0.70394707", "0.7019595", "0.69803166", "0.69772995", "0.6968585", "0.69661176", "0.69638526", "0.69461054", "0.6944906", "0.69345975", "0.6929033", "0.6879657", "0.6868651", "0.6841124", "0.6841124", "0.6838421", "0.6836685", "0.68302995", "0.68231577", "0.6820523", "0.68186164", "0.6817993", "0.6814434", "0.6810287", "0.68025327", "0.68025327", "0.68025327", "0.68025327", "0.68025327", "0.67994684", "0.6785885", "0.6777233", "0.6770289", "0.6765026", "0.67408663", "0.6738061", "0.6732515", "0.6724387", "0.67127436", "0.6709154", "0.67078656", "0.67061764", "0.66985214", "0.6693076", "0.6690166", "0.66861993", "0.66852444", "0.66841894", "0.668105", "0.66783303", "0.6675979", "0.66755795", "0.6668213", "0.6663926", "0.66636354", "0.6653556", "0.66509235", "0.664999", "0.6649033", "0.6637098", "0.66336304", "0.6631323", "0.6625983", "0.6625721", "0.6623843", "0.66173095", "0.66148347", "0.66109735", "0.66106117", "0.6608949", "0.66061795", "0.6604943", "0.66014564", "0.6596899", "0.6591244", "0.65905946", "0.65902054", "0.6587898", "0.6586982", "0.65866494", "0.6586475", "0.6582731", "0.65757215", "0.657006", "0.65661705", "0.6563354" ]
0.75473046
2
An function that would stop training per epoch for advanced calculation, plot, etc.
def iter_epoch(self): # set to train mode self._set_train() # start epoch for i, (source, target) in enumerate(self.train_dataset): self._batch_iter(source, target, i) if self.info: print(f"\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }") # update epoch and reset the epoch_loss self.epoch_loss.reset() self.epoch += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def should_stop(self, epoch: int) -> bool:\n return False", "def should_stop(self, epoch: int) -> bool:\n raise NotImplementedError", "def end_training(self):\n self.training = False", "def choose_to_stop_early(self):\n # return self.cumulated_num_tests > 10 # Limit to make 10 predictions\n # return np.random.rand() < self.early_stop_proba\n batch_size = 30 # See ingestion program: D_train.init(batch_size=30, repeat=True)\n num_examples = self.metadata_.size()\n num_epochs = self.cumulated_num_steps * batch_size / num_examples\n return num_epochs > self.num_epochs_we_want_to_train # Train for certain number of epochs then stop", "def stop_fit(self):\n self._stop_fit = True", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 1.0 # no exploration\n self.lr = 0.0 # no learning", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 0.0 # no exploration\n self.alpha = 0.0 # no learning", "def stop_criterion(self, es, epoch, logs):\n es.model = self.sdnet.Decomposer\n es.on_epoch_end(epoch, logs)\n if es.stopped_epoch > 0:\n return True", "def stopping_criteria(self):\n stop_training = False\n if self.training_stopping_criteria == \"early_stopping\":\n stop_training = self.early_stopping()\n elif self.training_stopping_criteria == 'SGDR_ensemble':\n T_0 = self.scheduler_params['T_0']\n if self.step_scheduler_after == 'batch':\n T_0 = T_0/self.num_train_iterations\n print(f\"sgdr T_0={T_0}\")\n\n T_mult = self.scheduler_params['T_mult']\n N = self.stopping_criteria_params['N']\n M = self.stopping_criteria_params['M']\n possible_epoch = [T_0*math.pow(T_mult, i) for i in range(N)] \n possible_epoch = np.cumsum(possible_epoch)\n snapshot_epochs = possible_epoch[-M:]\n print(snapshot_epochs)\n\n if self.current_epoch in snapshot_epochs:\n print(\"warm restarted learning rate\")\n print(f\"saving model={self.checkpoint_snapshot} out of {M}\")\n model_path = os.path.join(path_checkpoints_dir, \n f\"{self.experiment_tag}_snapshot_{self.checkpoint_snapshot}_epoch_{self.current_epoch}.pth\")\n self.checkpoint_snapshot += 1\n self.save_checkpoint(model_path=model_path)\n\n if self.current_epoch == possible_epoch[-1]:\n stop_training = True\n\n elif self.training_stopping_criteria == \"max_epoch\":\n if self.current_epoch == self.stopping_criteria_params['max_epoch']: \n stop_training = True\n self.save_checkpoint()\n \n return stop_training", "def on_epoch_end(self):\n if self.start_time is None:\n self.start_time = time.time()\n dt = time.time()-self.start_time\n if self.time_limit < dt:\n self.model.stop_training()\n if self.verbose:\n print('TimeOut after '+str(dt)+'s')", "def check_early_stop(trainer, epochs):\n end_epoch = trainer.updater.get_iterator(\"main\").epoch\n if end_epoch < (epochs - 1):\n logging.warning(\n \"Hit early stop at epoch \"\n + str(end_epoch)\n + \"\\nYou can change the patience or set it to 0 to run all epochs\"\n )", "def get_train_stop_fun(num_iter):\n def train_stop_fun(*args):\n count = args[3]\n return tf.less(count, num_iter)\n return train_stop_fun", "def early_stopping(self, epoch):\n if len(self.test_res['score']) == 0:\n return -1, False\n if self.test_res['score'][-1] > self.best_score: # improvement\n self.best_score = self.test_res['score'][-1]\n self.cur_patience = 0\n else:\n self.cur_patience += 1\n\n if self.max_patience == self.cur_patience: # early stop must happen\n best_epoch = epoch - self.max_patience\n return best_epoch, True\n else:\n return epoch, False", "def train_model(self):\n ### Early Stop Mechanism\n loss = previous_loss = float(\"inf\")\n patience_left = self.config.patience\n ### Early Stop Mechanism\n\n self.generator = Generator(self.model.config, training_strategy=self.training_strategy)\n self.evaluator = Evaluator(model=self.model, data_type=self.teston, debug=self.debug)\n\n if self.config.loadFromData:\n self.load_model()\n \n for cur_epoch_idx in range(self.config.epochs):\n print(\"Epoch[%d/%d]\"%(cur_epoch_idx,self.config.epochs))\n loss = self.train_model_epoch(cur_epoch_idx)\n self.test(cur_epoch_idx)\n\n ### Early Stop Mechanism\n ### start to check if the loss is still decreasing after an interval. \n ### Example, if early_stop_epoch == 50, the trainer will check loss every 50 epoche.\n ### TODO: change to support different metrics.\n if ((cur_epoch_idx + 1) % self.config.early_stop_epoch) == 0: \n if patience_left > 0 and previous_loss <= loss:\n patience_left -= 1\n print('%s more chances before the trainer stops the training. (prev_loss, curr_loss): (%.f, %.f)' % \\\n (patience_left, previous_loss, loss))\n\n elif patience_left == 0 and previous_loss <= loss:\n self.evaluator.result_queue.put(Evaluator.TEST_BATCH_EARLY_STOP)\n break\n else:\n patience_left = self.config.patience\n\n previous_loss = loss\n ### Early Stop Mechanism\n\n self.generator.stop()\n self.evaluator.save_training_result(self.training_results)\n self.evaluator.stop()\n\n if self.config.save_model:\n self.save_model()\n\n if self.config.disp_result:\n self.display()\n\n if self.config.disp_summary:\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)\n\n self.export_embeddings()\n\n return loss", "def check_early_stop(self, engine, model_dir, epoch):\n if epoch > 0 and self.eval_engine.n_no_update == 0:\n # save model if previous epoch have already obtained better result\n engine.save_checkpoint(model_dir=model_dir)\n\n if self.eval_engine.n_no_update >= MAX_N_UPDATE:\n # stop training if early stop criterion is triggered\n print(\n \"Early stop criterion triggered, no performance update for {:} times\".format(\n MAX_N_UPDATE\n )\n )\n return True\n return False", "def _maybe_stop_iteration(self, global_step, batch_count):\n if batch_count == self.STOP_BATCH_COUNT_PER_EPOCH:\n warnings.warn(\n \"The memory benchmark runner performs only \"\n + f\"{self.STOP_BATCH_COUNT_PER_EPOCH} steps per epoch.\"\n )\n raise StopIteration", "def stop(self):\n return not self.iteration < self.options['max_iters']", "def stop_training_job(TrainingJobName=None):\n pass", "def stopTestRun(self):", "def testTrainingStop(self):\n # The `train()` function raises a RuntimeError\n with self.assertRaises(RuntimeError):\n train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[_kill_callback(self.die_lock_file)],\n num_boost_round=20,\n ray_params=RayParams(max_actor_restarts=0, num_actors=2))", "def test_early_stopping_no_val_step(tmpdir):\n\n class CurrentModel(EvalModelTemplate):\n def training_step(self, *args, **kwargs):\n output = super().training_step(*args, **kwargs)\n output.update({'my_train_metric': output['loss']}) # could be anything else\n return output\n\n model = CurrentModel()\n model.validation_step = None\n model.val_dataloader = None\n\n stopping = EarlyStopping(monitor='my_train_metric', min_delta=0.1)\n trainer = Trainer(\n default_root_dir=tmpdir,\n early_stop_callback=stopping,\n overfit_pct=0.20,\n max_epochs=5,\n )\n result = trainer.fit(model)\n\n assert result == 1, 'training failed to complete'\n assert trainer.current_epoch < trainer.max_epochs", "def train(self):\n self._stop_gradient = False", "def on_epoch_end(self):\n self.epoch += 1\n metric = self.metric_fn()\n if metric < self.best:\n self.best = metric\n self.count = 0\n else:\n self.count += 1\n if self.count > self.patience:\n self.model.stop_training()\n if self.verbose:\n print(self.name+' after '+str(self.epoch)+' epochs')", "def training_end(self):\n pass", "def stop_run(arn=None):\n pass", "def __call__(self,\n epoch: int,\n step: int,\n performance_measures: Dict,\n context: ModelTrainer,\n validation: bool = False) -> None:\n if step != len(context.train_data_loader) - 1: # only continue at end of epoch\n return\n\n if self.monitor not in performance_measures:\n return\n\n current_loss = performance_measures[self.monitor]\n if (self.last_best - current_loss) >= self.min_delta:\n self.last_best = current_loss\n self.counter = 0\n else:\n self.counter += 1\n\n if self.counter >= self.patience:\n context._stop_training = True # make ModelTrainer stop\n LOGGER.info(f\"Early stopping after epoch {epoch}\")", "def rnn_no_early_stop(dataset):\n model, prefix = make_rnn()\n\n print(\"RNN without early stopping\")\n cross_validate(dataset, model)\n\n print(\"RNN without early stopping, run 2\")\n cross_validate(dataset, model)", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 0.0 # no exploration\n self.alpha = 0.0 # no learning\n\n print(\"Agent Stop Episode\")\n print(self.episodeRewards)\n for feature, weight in self.weights.iteritems():\n print(\"\\t\" + str(feature) + \" - \" + str(weight))\n self.episodeRewardsList.append(self.episodeRewards)", "def k_fold_keras_early_stop(model,X,y,k=10,epochs=200,batch_size=128,datagen=None,\n random_state=None,name='k-fold-CV',train_at_end=False, patience=3):\n # define k-fold cross validation test \n kfold = StratifiedKFold(n_splits=k, shuffle=True, random_state=random_state)\n # init metrics\n cvscores = []\n episodes = []\n roc_aucs = []\n val_loss = [] \n # save the initialised weights\n tmp_file = 't'+str(time.time()*1000)+'.h5'\n model.save_weights(tmp_file)\n\n ensemble_list = []\n with tqdm(total=k,desc=name.ljust(20)) as pbar:\n for train, val in kfold.split(X, y):\n # Fit the initialised model\n model.load_weights(tmp_file)\n earlyStopping = EarlyStopping(monitor='val_acc',mode='auto',verbose=0, patience=patience)\n if datagen:\n train_generator=datagen.flow(X[train], y[train], batch_size=batch_size)\n training = model.fit_generator(train_generator, validation_data=(X[val], y[val]),\n epochs=epochs, callbacks=[earlyStopping], verbose=0) \n else:\n training = model.fit(X[train], y[train], validation_data=(X[val], y[val]),\n epochs=epochs, batch_size=batch_size, callbacks=[earlyStopping], verbose=0) \n\n # evaluate the model\n scores = model.evaluate(X[val], y[val], verbose=0)\n cvscores.append(scores[1] * 100)\n y_predict = model.predict(X[val])\n roc_aucs.append(roc_auc_score(y[val],y_predict))\n episodes.append(len(training.history['val_loss']))\n y_predict = y_predict.astype('float64') # apparently without this log_loss sometimes creates NANs\n val_loss.append(log_loss(y[val],y_predict,eps=1e-15))\n pbar.update(1)\n pbar.set_postfix(Acc=round(np.mean(cvscores),4),vloss=np.mean(val_loss),\n ROC_AUC=np.mean(roc_aucs),Epi=np.mean(episodes))\n if train_at_end:\n model_copy = clone_model(model)\n model_copy.set_weights(model.get_weights())\n ensemble_list.append(model_copy)\n pbar.close()\n os.remove(tmp_file)\n\n if train_at_end:\n ensemble_model = ensemble(ensemble_list)\n print('Created ensemble.')\n return (cvscores, roc_aucs,episodes,val_loss),ensemble_model\n\n else:\n ensemble_model = None\n return (cvscores, roc_aucs,episodes,val_loss)", "def on_train_end(self, logs=None):\n self.epoch_iter = 0", "def test_early_stopping_call_decreasing():\n early_stopping = learning.EarlyStopping(patience=PATIENCE, decreasing=True)\n assert not early_stopping(-1)\n for i in range(PATIENCE):\n assert not early_stopping(i)\n assert early_stopping(0)", "def finish_training(self, error: bool = False, **info):\n pass", "def _end_training(self):\n # Reset this variable as it is reused during evaluation phase\n self.is_filtered = False\n self.eval_config = {}\n \n #close the tf session\n self.sess_train.close()\n \n #set is_fitted to true to indicate that the model fitting is completed\n self.is_fitted = True", "def stop(self):\n stopping = False\n if self.currentGeneration > maxIter:\n stopping = True\n logging.info(\"Maximum Iterations Reached!\")\n return stopping", "def testTrainingStopElastic(self):\n # The `train()` function raises a RuntimeError\n ft_manager = FaultToleranceManager.remote()\n\n ft_manager.schedule_kill.remote(rank=0, boost_round=3)\n ft_manager.schedule_kill.remote(rank=1, boost_round=6)\n ft_manager.delay_return.remote(\n rank=0, start_boost_round=4, end_boost_round=5)\n\n delay_callback = DelayedLoadingCallback(\n ft_manager, reload_data=True, sleep_time=0.1)\n die_callback = DieCallback(ft_manager, training_delay=0.25)\n\n with self.assertRaises(RuntimeError):\n train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[die_callback],\n num_boost_round=20,\n ray_params=RayParams(\n elastic_training=True,\n max_failed_actors=1,\n max_actor_restarts=1,\n num_actors=2,\n distributed_callbacks=[delay_callback]))", "def stopTest(self, test):", "def auto_stop(loss_list, params):\n\n if len(loss_list) > params['count_loss_num']:\n array_loss = np.array(loss_list)\n # get newest losses\n array_loss = array_loss[-1*params['count_loss_num']:]\n loss_std = np.std(array_loss)\n\n # print('stop_mark=', loss_std, 'num=', params['stop_num'])\n\n # continuely\n # if loss_std occasionally less then stop_mark, and then it rises,\n # so set stop_num = 0\n if params['stop_num'] > 0:\n if loss_std > params['stop_mark']:\n params['stop_num'] = 0\n\n if loss_std < params['stop_mark']:\n params['stop_num'] += 1\n\n # if keep 5 times continuely meet the requirements\n if params['stop_num'] == params['stop_times']:\n return True\n return False", "def _perform_early_stopping_test(self, epoch):\n\n if epoch >= self.early_stopping_params.get('burn_in', DEFAULT_BURN_IN_EARLY_STOPPING) \\\n and epoch % self.early_stopping_params.get('check_interval',\n DEFAULT_CHECK_INTERVAL_EARLY_STOPPING) == 0:\n # compute and store test_loss\n ranks = []\n\n for x_test_triple in self.x_valid:\n rank_triple = self.sess_train.run([self.rank], feed_dict={self.X_test_tf: [x_test_triple]})\n ranks.append(rank_triple)\n\n if self.early_stopping_criteria == 'hits10':\n current_test_value = hits_at_n_score(ranks, 10)\n elif self.early_stopping_criteria == 'hits3':\n current_test_value = hits_at_n_score(ranks, 3)\n elif self.early_stopping_criteria == 'hits1':\n current_test_value = hits_at_n_score(ranks, 1)\n elif self.early_stopping_criteria == 'mrr':\n current_test_value = mrr_score(ranks)\n\n if self.early_stopping_best_value >= current_test_value:\n self.early_stopping_stop_counter += 1\n if self.early_stopping_stop_counter == self.early_stopping_params.get('stop_interval',\n DEFAULT_STOP_INTERVAL_EARLY_STOPPING):\n\n # If the best value for the criteria has not changed from initial value then\n # save the model before early stopping\n if self.early_stopping_best_value == INITIAL_EARLY_STOPPING_CRITERIA_VALUE:\n self._save_trained_params()\n\n if self.verbose:\n msg = 'Early stopping at epoch:{}'.format(epoch)\n logger.info(msg)\n print(msg)\n msg = 'Best {}: {:10f}'.format(self.early_stopping_criteria, self.early_stopping_best_value)\n logger.info(msg)\n print(msg)\n return True\n else:\n self.early_stopping_best_value = current_test_value\n self.early_stopping_stop_counter = 0\n self._save_trained_params()\n\n if self.verbose:\n msg = 'Current best:{}'.format(self.early_stopping_best_value)\n logger.info(msg)\n print(msg)\n msg = 'Current:{}'.format(current_test_value)\n logger.info(msg)\n print(msg)\n\n return False", "def on_train_end(self):", "def shutdown_training(self):\n\n self._train_data_set = None\n self._test_data_set = None", "def stop(self, metric, model, metric_type='better_decrease', delta=0.03):\n self.delta = delta\n delta = self.delta * metric\n\n if self.best_score is None:\n self.best_score = metric\n self.save_model_state(metric, model)\n return False\n\n if abs(metric - self.best_score) < self.delta/3 * metric:\n self.num_const_scores += 1\n if self.num_const_scores >= self.wait_epochs + 1:\n print('\\nTraining stoped by EarlyStopping')\n return True\n else:\n self.num_const_scores = 0\n\n if metric_type == 'better_decrease':\n if metric > self.best_score + delta:\n self.num_bad_scores += 1\n elif metric > self.best_score:\n self.num_bad_scores = 0\n else:\n self.best_score = metric\n self.save_model_state(metric, model)\n self.num_bad_scores = 0\n\n else:\n if metric < self.best_score - delta:\n self.num_bad_scores += 1\n elif metric < self.best_score:\n self.num_bad_scores = 0\n else:\n self.best_score = metric\n self.save_model_state(metric, model)\n self.num_bad_scores = 0\n\n if self.num_bad_scores >= self.wait_epochs:\n print('\\nTraining stoped by EarlyStopping')\n return True\n\n\n return False", "def stopping_condition(self, losses):\n\n # if last validation loss is higher than during last three iterations, stop training\n should_stop = True\n for iteration in np.arange(2, 5):\n if losses[-1] < losses[-iteration]:\n should_stop = False\n return should_stop", "def stop(self):\n self._should_run = False", "def train_one_epoch(self):\n raise NotImplementedError", "def epoch_end(self, epoch: int, logs: Dict):\n val = logs.get(self.monitor)\n\n if self.improvement(val=val, best_val=self.best_val):\n\n self.checkpoint(epoch=epoch)", "def on_train_epoch_end(\n self, trainer, pl_module, *args\n ): # pylint: disable=signature-differs,arguments-differ,unused-argument\n # If validation loop is enabled (meaning `validation_step` is overridden),\n # log metrics in `on_validaion_epoch_end` to avoid logging the same metrics\n # records twice\n if not trainer.enable_validation:\n self._log_epoch_metrics(trainer, pl_module)", "def tune_model(self):\n acc = 0\n ### Early Stop Mechanism\n loss = previous_loss = float(\"inf\")\n patience_left = self.config.patience\n ### Early Stop Mechanism\n\n self.generator = Generator(self.model.config, training_strategy=self.training_strategy)\n self.evaluator = Evaluator(model=self.model,data_type=self.teston, debug=self.debug, tuning=True)\n \n for cur_epoch_idx in range(self.config.epochs):\n loss = self.train_model_epoch(cur_epoch_idx, tuning=True)\n ### Early Stop Mechanism\n ### start to check if the loss is still decreasing after an interval. \n ### Example, if early_stop_epoch == 50, the trainer will check loss every 50 epoche.\n ### TODO: change to support different metrics.\n if ((cur_epoch_idx + 1) % self.config.early_stop_epoch) == 0: \n if patience_left > 0 and previous_loss <= loss:\n patience_left -= 1\n print('%s more chances before the trainer stops the training. (prev_loss, curr_loss): (%.f, %.f)' % \\\n (patience_left, previous_loss, loss))\n\n elif patience_left == 0 and previous_loss <= loss:\n self.evaluator.result_queue.put(Evaluator.TEST_BATCH_EARLY_STOP)\n break\n else:\n patience_left = self.config.patience\n\n previous_loss = loss\n\n self.generator.stop()\n self.evaluator.test(cur_epoch_idx)\n acc = self.evaluator.output_queue.get()\n self.evaluator.stop()\n\n return acc", "def _ShouldStop(self, task_global_step):\n if task_global_step >= self._task_params.train.max_steps:\n tf.logging.info('ShouldStop: step:%6d params.train.max_steps:%6d',\n task_global_step, self._task_params.train.max_steps)\n return True\n\n return False", "def _ShouldStop(self, task_global_step):\n if task_global_step >= self._task_params.train.max_steps:\n tf.logging.info('ShouldStop: step:%6d params.train.max_steps:%6d',\n task_global_step, self._task_params.train.max_steps)\n return True\n\n return False", "def test_no_val_on_train_epoch_loop_restart(tmpdir):\n trainer_kwargs = {\n \"max_epochs\": 1,\n \"limit_train_batches\": 1,\n \"limit_val_batches\": 1,\n \"num_sanity_val_steps\": 0,\n \"enable_checkpointing\": False,\n }\n trainer = Trainer(**trainer_kwargs)\n model = BoringModel()\n trainer.fit(model)\n ckpt_path = str(tmpdir / \"last.ckpt\")\n trainer.save_checkpoint(ckpt_path)\n\n trainer_kwargs[\"max_epochs\"] = 2\n trainer = Trainer(**trainer_kwargs)\n\n with patch.object(\n trainer.fit_loop.epoch_loop.val_loop, \"advance\", wraps=trainer.fit_loop.epoch_loop.val_loop.advance\n ) as advance_mocked:\n trainer.fit(model, ckpt_path=ckpt_path)\n assert advance_mocked.call_count == 1", "def turn_off_learning(self):\n self.epsilon = 0\n self.alpha = 0", "def on_epoch_end(self, epoch, logs=None):", "def on_epoch_end(self, epoch, logs=None):", "def check_should_stop(performance_history, mode='mean', min_delta=0.03,\n kernel_size=5, k_splits=5):\n if mode == 'mean':\n performance_to_consider = performance_history[:-1]\n thresh = np.mean(performance_to_consider)\n tf.logging.info(\"====Mean====\")\n tf.logging.info(thresh)\n tf.logging.info(performance_history[-1])\n should_stop = performance_history[-1] > thresh\n\n elif mode == 'robust_mean':\n performance_to_consider = performance_history[:-1]\n perc = np.percentile(performance_to_consider, q=[5, 95])\n temp = []\n for perf_val in performance_to_consider:\n if perc[0] < perf_val < perc[1]:\n temp.append(perf_val)\n should_stop = performance_history[-1] > np.mean(temp)\n\n elif mode == 'median':\n performance_to_consider = performance_history[:-1]\n should_stop = performance_history[-1] > np.median(\n performance_to_consider)\n\n elif mode == 'generalisation_loss':\n value = compute_generalisation_loss(performance_history)\n should_stop = value > 0.2\n\n elif mode == 'median_smoothing':\n smoothed = median_filter(performance_history[:-1],\n size=kernel_size)\n gradient = np.gradient(smoothed)\n thresholded = np.where(gradient < min_delta, 1, 0)\n value = np.sum(thresholded) * 1.0 / len(gradient)\n should_stop = value < 0.5\n elif mode == 'validation_up':\n remainder = len(performance_history) % k_splits\n performance_to_consider = performance_history[remainder:]\n strips = np.split(np.array(performance_to_consider), k_splits)\n gl_increase = []\n for strip in strips:\n generalisation_loss = compute_generalisation_loss(strip)\n gl_increase.append(generalisation_loss >= min_delta)\n tf.logging.info(\"====Validation_up====\")\n tf.logging.info(gl_increase)\n should_stop = False not in gl_increase\n else:\n raise Exception('Mode: {} provided is not supported'.format(mode))\n return should_stop", "def validation_step(self, *args: Any, **kwargs: Any) -> None:\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if (\n batch_idx < 10\n and hasattr(self.trainer, \"datamodule\")\n and self.logger\n and hasattr(self.logger, \"experiment\")\n and hasattr(self.logger.experiment, \"add_figure\")\n ):\n try:\n datamodule = self.trainer.datamodule\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n except ValueError:\n pass", "def test(self):\n self.training = False", "def get_gen_stop_fun(num_iter):\n def gen_stop_fun(*args):\n count = args[2]\n return tf.less(count, num_iter)\n return gen_stop_fun", "def stop(self):\n self._run = False", "def on_epoch_(\n self,\n super_callback: \"AllennlpWandbCallback\",\n trainer: \"GradientDescentTrainer\",\n metrics: Dict[str, Any],\n epoch: int,\n is_primary: bool = True,\n **kwargs: Any,\n ) -> None:\n pass", "def TrainEpoch(ss):\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def stop():", "def stop():", "def stop():", "def stop():", "def evaluate(self):\n self.training = False", "def on_epoch(self, e):\n\n dataloader = self.datasetManager.get_dataloader()\n length_dataloader = len(dataloader)\n print(\"Epoch %i\"%e)\n print(\"-\"*15)\n for i, batch in tqdm.tqdm(enumerate(dataloader)):\n index = e*length_dataloader+i\n batch = self.to_device(batch)\n img = batch[0]\n gts = batch[1]\n\n #add filter here !\n\n out = self.network(img)\n loss = self.loss(out, gts)\n \n self.tb_writer.add_scalar('Training Loss', loss, index)\n\n if index % self.config['Validation']['validation_step'] == 0:\n \"\"\"\n Validation and saving of the model\n \"\"\"\n # self.tb_writer.add_scalar('Training loss', loss.item(), index)\n with torch.no_grad():\n valid_loss = self.validate(index)\n if valid_loss < self.best_valid_loss:\n self.best_valid_loss = valid_loss\n filename = 'trained_model_iter_%i_loss_%.4f.pth'%(index, valid_loss)\n filename = os.path.join(self.output_dir, 'trained_model', filename)\n self.network.save_model(filename, optimizers=self.opt)\n\n self.backward_and_step(loss) #On appel la backpropagation", "def stop_current_episode(self):\n raise NotImplementedError", "def stop() -> None:", "def stop_check(self):\n pass", "def _train_epoch(self, train_batches, data, max_metric_value, metric_save, patience, step_pbar):\n evaluate = True\n exit_tag = False\n num_steps = self.args.num_steps\n check_point, batch_size = self.args.check_point, self.args.batch_size\n save_dir, save_prefix = self.args.save_dir, self.args.algo\n\n for bitx, batch in enumerate(train_batches):\n if evaluate and self.global_step % self.eval_freq == 0:\n if data.dev_set is not None:\n dev_batches = data.gen_mini_batches('dev', 31928, shuffle=False)\n dev_loss, dev_perplexity, dev_perplexity_at_rank = self.evaluate(dev_batches, data)\n #print('dev loss=%s' % dev_loss, 'dev ppl=%s' % dev_perplexity, 'dev ppl at rank=', dev_perplexity_at_rank)\n\n test_batches = data.gen_mini_batches('test', 41405, shuffle=False)\n test_loss, test_perplexity, test_perplexity_at_rank = self.evaluate(test_batches, data)\n #print('test loss=%s' % test_loss, 'dev ppl=%s' % test_perplexity, 'dev ppl at rank=' , test_perplexity_at_rank)\n\n self.writer.add_scalar(\"dev/loss\", dev_loss, self.global_step)\n self.writer.add_scalar(\"dev/perplexity\", dev_perplexity, self.global_step)\n self.writer.add_scalar(\"test/loss\", test_loss, self.global_step)\n self.writer.add_scalar(\"test/perplexity\", test_perplexity, self.global_step)\n\n for trunc_level in self.trunc_levels:\n ndcg_version1, ndcg_version2 = self.relevance_estimator.evaluate(self, data, self.relevance_queries, trunc_level)\n self.writer.add_scalar(\"NDCG_version1/{}\".format(trunc_level), ndcg_version1, self.global_step)\n self.writer.add_scalar(\"NDCG_version2/{}\".format(trunc_level), ndcg_version2, self.global_step)\n\n if dev_loss < metric_save:\n metric_save = dev_loss\n patience = 0\n else:\n patience += 1\n # Trick: do not decay d_lr help convergence\n if patience >= self.patience:\n #self.adjust_learning_rate(self.discrim_optimizer, self.args.lr_decay)\n self.adjust_learning_rate(self.policy_optimizer, self.args.lr_decay)\n self.g_lr *= self.args.lr_decay\n #self.d_lr *= self.args.lr_decay\n self.writer.add_scalar('train/g_lr', self.g_lr, self.global_step)\n #self.writer.add_scalar('train/d_lr', self.d_lr, self.global_step)\n metric_save = dev_loss\n patience = 0\n self.patience += 1\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n self.global_step += 1\n step_pbar.update(1)\n QIDS = Variable(torch.from_numpy(np.array(batch['qids'], dtype=np.int64)))\n UIDS = Variable(torch.from_numpy(np.array(batch['uids'], dtype=np.int64)))\n VIDS = Variable(torch.from_numpy(np.array(batch['vids'], dtype=np.int64)))\n PRE_CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, :-1]))\n CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, 1:]))\n\n # generate trajectories\n for __ in range(self.args.d_step):\n actor_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n critic_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n CLICK_ = torch.zeros(QIDS.shape[0], 1, dtype=CLICKS.dtype)\n logits = torch.zeros(QIDS.shape[0], 0, 2)\n values = torch.zeros(QIDS.shape[0], 0)\n CLICKS_ = Variable(torch.zeros((QIDS.shape[0], 0), dtype=CLICKS.dtype))\n if self.use_cuda:\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS = QIDS.cuda(), UIDS.cuda(), VIDS.cuda(), PRE_CLICKS.cuda(), CLICKS.cuda()\n actor_rnn_state, critic_rnn_state, CLICK_ = actor_rnn_state.cuda(), critic_rnn_state.cuda(), CLICK_.cuda()\n logits, values, CLICKS_ = logits.cuda(), values.cuda(), CLICKS_.cuda()\n self.policy.eval()\n for i in range(self.max_d_num + 1):\n logit, value, actor_rnn_state, critic_rnn_state = self.policy(QIDS[:, i:i+1], \n UIDS[:, i:i+1], \n VIDS[:, i:i+1], \n CLICK_, \n actor_rnn_state, \n critic_rnn_state)\n if i > 0:\n CLICK_ = torch.distributions.Categorical(logit).sample()\n logits = torch.cat([logits, logit], dim=1)\n values = torch.cat([values, value], dim=1)\n CLICKS_ = torch.cat([CLICKS_, CLICK_], dim=1)\n\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n\n '''update discriminator'''\n for _ in range(self.args.k):\n self.discrim.train()\n self.discrim_optimizer.zero_grad()\n g_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS_)\n g_o_target = torch.ones((QIDS.shape[0], g_o.shape[1]))\n e_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS)\n e_o_target = torch.zeros((QIDS.shape[0], e_o.shape[1]))\n if self.use_cuda:\n g_o_target, e_o_target = g_o_target.cuda(), e_o_target.cuda()\n \n discrim_loss = self.discrim_criterion(g_o, g_o_target) + self.discrim_criterion(e_o, e_o_target)\n discrim_loss.backward()\n self.discrim_optimizer.step()\n self.writer.add_scalar('train/d_loss', discrim_loss.data, self.global_step)\n\n '''estimate advantage'''\n with torch.no_grad():\n self.discrim.eval()\n rewards = -torch.log(self.discrim(QIDS, UIDS, VIDS, CLICKS_)[0])\n # print(rewards.shape, values.shape)\n #print(tensor_type)\n #exit(0)\n deltas = torch.zeros(rewards.shape)\n advantages = torch.zeros(rewards.shape)\n prev_value = torch.zeros(rewards.shape[0])\n prev_advantage = torch.zeros(rewards.shape[0])\n if self.use_cuda:\n deltas, advantages = deltas.cuda(), advantages.cuda()\n prev_value, prev_advantage = prev_value.cuda(), prev_advantage.cuda()\n '''print(deltas)\n print(advantages)\n print(prev_value)\n print(prev_advantage)\n exit(0)'''\n\n for i in reversed(range(rewards.size(1))):\n deltas[:, i] = rewards[:, i] + self.gamma * prev_value - values[:, i]\n advantages[:, i] = deltas[:, i] + self.gamma * self.tau * prev_advantage\n prev_value = values[:, i]\n prev_advantage = advantages[:, i]\n\n returns = values + advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + MINF)\n # advantages = (returns - returns.mean())/returns.std()\n\n fixed_log_probs = torch.distributions.Categorical(logits).log_prob(CLICKS_[:, 1:])\n\n '''PPO update'''\n self.policy.train()\n optim_batchsize = 512\n optim_iter_num = int(math.ceil(QIDS.shape[0] / optim_batchsize))\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n for _ in range(self.args.g_step):\n perm = np.arange(QIDS.shape[0])\n np.random.shuffle(perm)\n\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS, CLICKS_, advantages, returns, fixed_log_probs = \\\n QIDS[perm].clone(), UIDS[perm].clone(), VIDS[perm].clone(), PRE_CLICKS[perm].clone(), \\\n CLICKS[perm].clone(), CLICKS_[perm].clone(), advantages[perm].clone(), returns[perm].clone(), fixed_log_probs[perm].clone()\n\n #print(QIDS)\n #exit(0)\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batchsize, min((i + 1) * optim_batchsize, QIDS.shape[0]))\n qids_b, uids_b, vids_b, pclicks_b, clicks_b, clicks__b, advantage_b, returns_b, fixed_log_probs_b = \\\n QIDS[ind], UIDS[ind], VIDS[ind], CLICKS_[ind, :-1], CLICKS[ind], CLICKS_[ind, 2:], \\\n advantages[ind], returns[ind], fixed_log_probs[ind]\n\n logits, values_pred, _, _ = self.policy(qids_b, uids_b, vids_b, pclicks_b)\n dist = torch.distributions.Categorical(logits)\n\n\n '''update critic'''\n value_loss = (values_pred - returns_b).pow(2).mean()\n '''optimizer policy'''\n log_probs_b = dist.log_prob(clicks__b)\n ratio = torch.exp(log_probs_b - fixed_log_probs_b)\n surr1 = ratio * advantage_b\n surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantage_b\n policy_surr = -torch.min(surr1, surr2).mean()\n pe = dist.entropy().mean()\n loss = value_loss + self.alpha * policy_surr - self.beta * pe\n\n self.policy_optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 40)\n self.policy_optimizer.step()\n g_loss, _ = self.compute_loss(logits, clicks_b)\n\n self.writer.add_scalar('train/g_loss', g_loss.data, self.global_step)\n self.writer.add_scalar('train/g_valueloss', value_loss.data, self.global_step)\n self.writer.add_scalar('train/g_policysurr', policy_surr.data, self.global_step)\n self.writer.add_scalar('train/g_entropy', pe.data, self.global_step)\n\n if check_point > 0 and self.global_step % check_point == 0:\n self.save_model(save_dir, save_prefix)\n if self.global_step >= num_steps:\n exit_tag = True\n\n return max_metric_value, exit_tag, metric_save, patience", "def _untrain(self):\n if self.__clf:\n self.__clf._untrain()", "def train(self, training_steps=10):", "def should_evaluate(self, epoch: int) -> bool:\n return False", "def on_train_end(self, logs=None):\n self.end_time = datetime.datetime.now()\n logging.info(f\"Ending training at {self.end_time}\")\n logging.info(f\"Training duration: {self.end_time - self.start_time}\")\n if self.stopped_epoch > 0:\n logging.info(f\"Early stopping at epoch {self.stopped_epoch}\")\n log_metric(\"best_valid_acc\", self.best_valid_acc)\n report_results([dict(\n name=\"valid_acc\",\n type=\"objective\",\n value=-self.best_valid_acc\n )])", "def run_epoch(model, data, optimizer, epoch):\n traindata, valdata = data\n\n model.train()\n train_bpd = epoch_iter(model, traindata, optimizer, epoch)\n\n model.eval()\n val_bpd = epoch_iter(model, valdata, optimizer, epoch)\n\n return train_bpd, val_bpd", "def test_early_stopping_call_increasing():\n early_stopping = learning.EarlyStopping(\n patience=PATIENCE,\n decreasing=False,\n )\n assert not early_stopping(PATIENCE + 1)\n for i in range(PATIENCE):\n assert not early_stopping(i)\n assert early_stopping(0)", "def _evaluate_during_fit(self, test_loader, epoch):", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def _stop(self):", "def end_epoch(self, metrics, curves):\n raise NotImplementedError", "def training_epoch_end(self, outputs: Any) -> None:\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()", "def learn(start_epoch, max_epochs, plot_interval, test_interval, checkpoint_interval, delete_checkpoints):\n\tglobal epoch\n\tfor epoch in range(start_epoch, int(max_epochs) + 1):\n\t\tsys.stdout.flush()\n\t\t#while os.path.exists(\"stop.txt\"): # Allows pausing during training\n\t\t#\ttime.sleep(5)\n\t\tstart_time = time.time()\n\t\tcorrect_syn, MAE_syn, avg_dev_syn = model_train(syn_train_loader, False)\n\t\tcorrect_nat, MAE_nat, avg_dev_nat = model_train(nat_train_loader, True)\n\t\tlen_nat = len(nat_train_loader.dataset)\n\t\tlen_syn = len(syn_train_loader.dataset)\n\t\tprint(\"Train Epoch: \" + str(\n\t\t\tepoch) + \"\\tNAT: Dec: {:.3f}\\tKLD: {:.4f}\\tCor: {:.3f}\\tMAE: {:.2f}\\tDEV: {:.3f}\\tRegr: {:.3f}\\t\\tSYN: Dec: {:.3f}\\tKLD: {:.4f}\\tCor: {:.3f}\\tMAE: {:.2f}\\tDEV: {:.3f}\\tRegr: {:.3f}\\ttime: {:.2f}s\"\n\t\t\t .format(decoder_nat_loss / len_nat, KLD_nat_loss / len_nat, correct_nat, MAE_nat, avg_dev_nat, regressor_nat / len_nat,\n\t\t\t\t\t decoder_syn_loss / len_syn, KLD_syn_loss / len_syn, correct_syn, MAE_syn, avg_dev_syn, regressor_syn / len_syn,\n\t\t\t\t\t time.time() - start_time))\n\t\treset_loss_sums()\n\t\tif epoch % test_interval == 0:\n\t\t\tcorrect_syn, MAE_syn = model_test(epoch, False)\n\t\t\tcorrect_nat, MAE_nat = model_test(epoch, True)\n\t\t\tlen_nat = len(nat_test_loader.dataset)\n\t\t\tlen_syn = len(syn_test_loader.dataset)\n\t\t\tprint(\"=> Test Epoch: \" + str(\n\t\t\t\tepoch) + \"\\tDec_nat: {:.3f}\\tKLD_nat: {:.4f}\\tCor_nat: {:.3f}\\tMAE_nat: {:.2f}\\tRegr_nat: {:.3f}\\tDec_syn: {:.3f}\\tKLD_syn: {:.4f}\\tCor_syn: {:.3f}\\tMAE_syn: {:.2f}\\tRegr_syn: {:.3f}\\ttime: {:.2f}s\"\n\t\t\t\t .format(decoder_nat_loss / len_nat, KLD_nat_loss / len_nat, correct_nat, MAE_nat, regressor_nat / len_nat,\n\t\t\t\t\t\t decoder_syn_loss / len_syn, KLD_syn_loss / len_syn, correct_syn, MAE_syn, regressor_syn / len_syn,\n\t\t\t\t\t\t time.time() - start_time))\n\t\t\treset_loss_sums()\n\t\t\trepresent()\n\t\tif epoch % eval_interval == 0:\n\t\t\tevaluate()\n\t\tif epoch % checkpoint_interval == 0:\n\t\t\tsave_log(decoder_nat_log, \"Decoder_nat\")\n\t\t\tsave_log(decoder_syn_log, \"Decoder_syn\")\n\t\t\tsave_log(KLD_nat_log, \"KLD_nat\")\n\t\t\tsave_log(KLD_syn_log, \"KLD_syn\")\n\t\t\tsave_log(regressor_nat_log, \"Regressor_nat\")\n\t\t\tsave_log(regressor_syn_log, \"Regressor_syn\")\n\t\t\tsave_log(correct_nat_log, \"Correct_nat\")\n\t\t\tsave_log(correct_syn_log, \"Correct_syn\")\n\t\t\ttorch.save({\n\t\t\t\t'epoch': epoch + 1,\n\t\t\t\t'state_dict': model.state_dict(),\n\t\t\t\t'optimizer': optimizer.state_dict(),\n\t\t\t}, directory + '/%d.pth' % epoch)\n\t\t\tif delete_checkpoints and epoch > checkpoint_interval: # the first one has to exist already\n\t\t\t\tos.remove(directory + \"/\" + str(epoch - checkpoint_interval) + \".pth\")\n\t\tif epoch % plot_interval == 0:\n\t\t\tplotlyplot.directory = directory\n\t\t\tplotlyplot.createPlots(100, 50, directory)\n\tshowcase()\n\tplaySound()", "def on_end_(\n self,\n super_callback: \"AllennlpWandbCallback\",\n trainer: \"GradientDescentTrainer\",\n metrics: Dict[str, Any] = None,\n epoch: int = None,\n is_primary: bool = True,\n **kwargs: Any,\n ) -> None:\n pass", "def train_epoch(model, training_data, optimizer, pred_loss_func, opt):\n\n model.train()\n\n total_event_ll = 0 # cumulative event log-likelihood\n total_time_se = 0 # cumulative time prediction squared-error\n total_time_error = 0 # cumulative time prediction squared-error\n total_time_latitude = 0 # cumulative latitude prediction squared-error\n total_time_longitude = 0 # cumulative longitude prediction squared-error\n total_event_rate = 0 # cumulative number of correct prediction\n total_num_event = 0 # number of total events\n total_num_pred = 0 # number of predictions\n for batch in tqdm(training_data, mininterval=2,\n desc=' - (Training) ', leave=False):\n \"\"\" prepare data \"\"\"\n event_time, time_gap, event_type, latitude, longitude = map(lambda x: x.to(opt.device), batch)\n\n \"\"\" forward \"\"\"\n optimizer.zero_grad()\n\n enc_out, prediction = model(event_type, event_time, latitude, longitude) # change the event_time to time gap\n\n \"\"\" backward \"\"\"\n # negative log-likelihood\n event_ll, non_event_ll = Utils.log_likelihood(model, enc_out, event_time, event_type, latitude, longitude) # change the event_time to time gap\n event_loss = -torch.sum(event_ll - non_event_ll)\n\n # type prediction\n pred_loss, pred_num_event = Utils.type_loss(prediction[0], event_type, pred_loss_func)\n\n # time prediction\n se = Utils.time_loss(prediction[1], event_time) # change the event_time to time gap\n\n # latitude prediction\n le = Utils.time_loss(prediction[2], latitude)\n\n # longitude prediction\n ge = Utils.time_loss(prediction[3], longitude)\n\n # SE is usually large, scale it to stabilize training\n scale_time_loss = 100\n loss = event_loss + pred_loss + se / scale_time_loss + le / scale_time_loss + ge / scale_time_loss\n loss.backward()\n\n \"\"\" update parameters \"\"\"\n optimizer.step()\n\n \"\"\" note keeping \"\"\"\n total_event_ll += -event_loss.item()\n total_time_se += se.item() + le.item() + ge.item()\n total_time_error += se.item()\n total_time_latitude += le.item()\n total_time_longitude += ge.item()\n\n total_event_rate += pred_num_event.item()\n total_num_event += event_type.ne(Constants.PAD).sum().item()\n # we do not predict the first event\n total_num_pred += event_type.ne(Constants.PAD).sum().item() - event_time.shape[0]\n\n total_rmse = np.sqrt(total_time_se / total_num_pred)\n time_rmse = np.sqrt(total_time_error / total_num_pred)\n latitude_rmse = np.sqrt(total_time_latitude / total_num_pred)\n longitude_rmse = np.sqrt(total_time_longitude / total_num_pred)\n print('Time: {:5f} Latitude: {:5f} Longitude: {:5f} Overall: {:5f} '.format(time_rmse, latitude_rmse, longitude_rmse, total_rmse))", "def on_epoch_end(self, epoch: int, logs: dict = None):\n if (epoch + 1) % self._valid_steps == 0:\n val_logs = self._model.evaluate(self._dev_x, self._dev_y,\n self._batch_size)\n if self._verbose:\n print('Validation: ' + ' - '.join(\n f'{k}: {v}' for k, v in val_logs.items()))\n for k, v in val_logs.items():\n logs[k] = v\n if self._model_save_path:\n curr_path = self._model_save_path + str('%d/' % (epoch + 1))\n self._model.save(curr_path)", "def stop(self) -> None:\n ...", "def stop_updating():\n data.stop_updating.set()", "def stop(self, newLoss, model):\n if self.best_model is None:\n self.best_model = model\n if(newLoss > self.lastLoss):\n self.succeedingHigherValues += 1\n else:\n self.succeedingHigherValues = 0\n self.__save_model(model)\n\n self.lastLoss = newLoss\n if(self.patience <= self.succeedingHigherValues):\n if self.patience > 1:\n self.best_model = torch.load(\"checkpoint.pth.tar\")\n return True\n else:\n return False", "def stop(self) -> None:", "def stop(self) -> None:", "def trainer(model, X_train, y_train, X_valid, y_valid, config):\n # loop for number of epochs\n # shuffle inputs based off seed\n # need to shuffle validation based off same seed\n # forward prop and get xenloss\n # backprop and update weights\n\n stop_count = config['early_stop_epoch']\n b_size = config[\"batch_size\"]\n stop = config['early_stop']\n\n xnloss = []\n val_loss = [float('inf')]\n test_scores = []\n\n train_accu = []\n valid_accu = []\n\n\n #validation loss increase per epoch counter\n c = -1\n \n for i in range(config[\"epochs\"]):\n np.random.seed(i)\n np.random.shuffle(X_train)\n\n np.random.seed(i)\n np.random.shuffle(y_train)\n\n '''You should average the loss across all mini batches'''\n #means sum up loss from all mini-batches and divide by num_batches\n sums = 0\n\n num_batches = int(X_train.shape[0] / b_size)\n k=0\n for j in range(num_batches):\n # choose minibatch\n x = X_train[j * b_size: (j+1) * b_size]\n targets = y_train[j * b_size: (j+1) * b_size]\n loss, y_pred = model.forward_pass(x, targets)\n loss = loss / (config['batch_size'] * 10) # 10 classes\n sums += loss\n #xnloss.append(loss)\n model.backward_pass()\n k +=1\n # if k < 5 or k > 44:\n # print(targets[0, :])\n # print(y_pred[0, :])\n # print(y_pred[0, :].sum())\n # print(k, '=============')\n\n # mini-batch done here, take avg of loss\n avg_loss = sums / num_batches\n xnloss.append(avg_loss)\n \n ''' epochs loop continues here\n 0) perform validation and compute its (val) loss\n\n 1) calculate test accuracy for every epoch where the\n validation loss is better than the previous validation loss.\n \n 2) Save this result (test score OR loss?) and choose the best \n one when you hit the early stopping criteria.\n\n 3) early stopping - stop training (epochs loop) after 5th consecutive \n increase in validation loss. (Experiment with diff values).\n '''\n\n '''VALIDATION PERFORMACE'''\n v_loss, v_pred = model.forward_pass(X_valid, y_valid)\n v_loss_norm = v_loss / (len(X_valid) * 10)\n\n\n '''TEST ACCURACY''' \n #if val loss better (less) than prev: calculate test scores\n \n if v_loss_norm > val_loss[-1]:\n print(\"val loss going up from last time at epoch i=\", i)\n c += 1\n else:\n c = 0\n '''insert code for test accu here'''\n # val_loss.append(v_loss_norm)\n # else: #else val loss increased, so increment counter\n \n val_loss.append(v_loss_norm)\n \n '''EARLY STOPPING'''\n if stop and c == stop_count:\n print(\"early stopped at epoch =\", i+1)\n break\n\n print(val_loss[1:3])\n print(val_loss, len(xnloss), len(val_loss[1:]))\n #outside of epochs loop\n plt.plot(xnloss, label='training loss')\n plt.plot(val_loss[1:], label='validation loss')\n plt.title(\"losses across all epochs\")\n plt.xlabel(\"epochs\")\n plt.ylabel(\"avg loss for the epoch\")\n plt.legend()\n plt.savefig('raised_a.png')\n plt.show()\n #firstplot.png is training loss against # of batches, in 1 epoch\n #avgacrossepochs.png is avg training loss of all batches, across 50 epochs\n # both_losses = []\n \n # for i in range(len(xnloss)):\n # both_losses.append((val_loss[i], xnloss[i]))\n # print(\"validation errors: \", [(val_loss[i], xnloss[i]) for i in range(len(xnloss))])", "def on_epoch_end(epoch, logs):\n global w2vmodel\n global max_seq_length\n global word_to_ix\n global ix_to_word\n global sample\n if epoch % 5 == 0:\n generate_text(model, w2vmodel, epoch, length=75, max_seq_length=max_seq_length,\n seed=\"Rain drop, drop top\\n\")\n return", "def stop(self):", "def stop(self):", "def __call__(self, trainer, epoch):\n if epoch >= self.ignore_before:\n if epoch - self.best_epoch < self.patience:\n if isinstance(self.retain_metric, str):\n current_res = trainer.val_metrics[self.retain_metric][-1]\n else:\n current_res = trainer.val_metrics[self.retain_metric.__name__][-1]\n if self.window is None:\n if self._has_improved(current_res):\n self.best_epoch = epoch\n self.best_res = current_res\n trainer.best_metric = current_res\n trainer.best_model = trainer.model\n else: # window mod\n # get validation metrics in certain window\n try:\n if isinstance(self.retain_metric, str):\n start = len(trainer.val_metrics[self.retain_metric]) - self.window\n start = 0 if start < 0 else start\n\n window_val_metrics = trainer.val_metrics[self.retain_metric][start:]\n else:\n start = len(trainer.val_metrics[self.retain_metric.__name__]) - self.window\n start = 0 if start < 0 else start\n window_val_metrics = trainer.val_metrics[self.retain_metric.__name__][start:]\n except KeyError:\n print(\n \"Couldn't find {} in validation metrics. Using \\\n loss instead.\".format(\n self.retain_metric\n )\n )\n start = len(trainer.val_metrics[self.retain_metric]) - self.window\n start = 0 if start < 0 else start\n window_val_metrics = trainer.val_metrics[\"loss\"][start:]\n\n # build mean\n mean_window_res = np.mean(window_val_metrics)\n\n # only safe when improvement to previous epoch detected\n # only a value BETTER than before can be the minimum/maximum of a\n # window with better mean than a previously detected window\n if len(window_val_metrics) == 1 \\\n or self._first_val_better(window_val_metrics[-1], window_val_metrics[-2]) \\\n or self._current_window_save_idx == -1:\n if self._current_window_save_idx == -1:\n self._current_window_save_idx = 0\n self._state_dict_storage[self._current_window_save_idx] = deepcopy(trainer.model.state_dict())\n # increase save idx and take modulo\n self._current_window_save_idx += 1\n self._current_window_save_idx = divmod(self._current_window_save_idx, self.window)[1]\n else: # only increase current_window_save_idx (for modulo index calculation to work)\n self._current_window_save_idx += 1\n self._current_window_save_idx = divmod(self._current_window_save_idx, self.window)[1]\n\n # always update current window best result - it might be at some point overall best result\n current_window_best_idx = self._get_cur_win_best_idx(window_val_metrics)\n if current_window_best_idx == len(window_val_metrics) - 1 \\\n or self._current_window_best_res == -1: # case of improvement or initialisation\n # overwrite model_state saved so far\n self._current_window_best_model_save_idx = self._current_window_save_idx\n self._current_window_best_epoch = epoch\n self._current_window_best_res = window_val_metrics[-1]\n\n # check if mean has improved and copy values as best model result\n if self._has_window_mean_improved(mean_window_res):\n self.best_mean_res = mean_window_res\n self.best_window_start = 0 if epoch - self.window + 1 < 0 else epoch - self.window + 1\n # save current window best as overall best\n self.best_res = self._current_window_best_res\n self.best_model = copy.deepcopy(self._state_dict_storage[self._current_window_best_model_save_idx])\n self.best_epoch = self._current_window_best_epoch\n trainer.best_metric = self._current_window_best_res\n trainer.best_model = trainer.model\n if self.info:\n print(\"Found a window with better validation metric mean:\")\n print(\"\\t metric mean: {}\".format(mean_window_res))\n print(\"\\t epoch start: {}\".format(self.best_window_start))\n print(\"\\t best result: {}\".format(self.best_res))\n\n else:\n # end training run\n trainer.stop_training = True\n if self.window is None:\n print(\"Early stopping at epoch {}.\\nBest model was at epoch {} with val metric score = {}\".format(\n epoch, self.best_epoch, self.best_res)\n )\n else:\n print(\"Early stopping with window mode at epoch {}.\\n\"\n \"Best results were achieved at epoch {} with val metric score = {}.\\n\"\n \"Best window of size {} achieved a mean result of {} and started at epoch {}.\".format(\n epoch, self.best_epoch, self.best_res, self.window, self.best_mean_res, self.best_window_start)\n )", "def on_validation_epoch_end(self, trainer, pl_module):\n self._log_epoch_metrics(trainer, pl_module)" ]
[ "0.7741919", "0.7645692", "0.7291979", "0.7016654", "0.6997903", "0.69406444", "0.6906733", "0.6906342", "0.6895668", "0.68619394", "0.6835608", "0.6817015", "0.6703942", "0.66655344", "0.66614753", "0.6556472", "0.6510778", "0.6507846", "0.6417945", "0.6395246", "0.63713014", "0.6368793", "0.6361399", "0.6336077", "0.63311255", "0.6324123", "0.631357", "0.62945163", "0.6243864", "0.6214102", "0.6187597", "0.6167642", "0.61529183", "0.6112457", "0.61092913", "0.6092925", "0.60896975", "0.6077414", "0.60703063", "0.6066024", "0.6064368", "0.60630953", "0.6050907", "0.603053", "0.6028115", "0.6026669", "0.5986058", "0.59758866", "0.59758866", "0.59741074", "0.59730095", "0.59710854", "0.59710854", "0.59539723", "0.5949345", "0.5949328", "0.59424007", "0.59306365", "0.5924801", "0.59138614", "0.59105974", "0.59105974", "0.59105974", "0.59105974", "0.5908664", "0.5908664", "0.5908664", "0.5908664", "0.58866704", "0.5886023", "0.58704394", "0.5866367", "0.58619493", "0.58619004", "0.58596134", "0.5851546", "0.58474094", "0.58427554", "0.5839534", "0.58388543", "0.58361906", "0.58314484", "0.58314484", "0.5806927", "0.58015114", "0.5787706", "0.57860935", "0.576729", "0.5762107", "0.5751498", "0.5746104", "0.5745841", "0.5745152", "0.5741308", "0.5741308", "0.57230264", "0.57180685", "0.57082826", "0.57082826", "0.5705601", "0.57018465" ]
0.0
-1
An iterator that training per batch and return for advanced calculation, plot, etc.
def iter_batch(self): # model initialization self._set_train() if not self.batch_process: self.batch_process = self._train_batch() return self.batch_process.__next__() else: try: return self.batch_process.__next__() except StopIteration: # update the state if StopIteration if self.info: print(f"\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }") # update epoch and reset the epoch_loss self.epoch_loss.reset() self.epoch += 1 # reset the batch process del self.batch_process self.batch_process = self._train_batch() return self.batch_process.__next__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def __iter__(self) -> Iterator[Batch]:\n return self.get_iterator()", "def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)", "def next(self):\n if self._curr_batch + 1 > self.num_batches:\n # no more batches in current iteration through data set so start\n # new epoch ready for another pass and indicate iteration is at end\n self.new_epoch()\n raise StopIteration()\n # create an index slice corresponding to current batch number\n batch_slice = slice(self._curr_batch * self.batch_size,\n (self._curr_batch + 1) * self.batch_size)\n inputs_batch = self.inputs[batch_slice]\n targets_batch = self.targets[batch_slice]\n # target_ids_global = self.target_ids[batch_slice]\n target_ids_batch = self.target_ids[batch_slice]\n self._curr_batch += 1\n\n batch_inputs, batch_target_ids, batch_targets = \\\n self.transform_batch(inputs_batch, target_ids_batch, targets_batch)\n\n return batch_inputs, batch_targets, batch_target_ids", "def run_train_iter(self, session, batch, summary_writer):\n # Match up our input data with the placeholders\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout\n\n # if not use raw graph tokens\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n\n # output_feed contains the things we want to fetch.\n output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm, self.dev_loss]\n\n # Run the model\n [_, summaries, loss, global_step, param_norm, gradient_norm, dev_loss] = session.run(output_feed, input_feed)\n\n # All summaries in the graph are added to Tensorboard\n summary_writer.add_summary(summaries, global_step)\n\n return loss, global_step, param_norm, gradient_norm, dev_loss", "def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []", "def __iter__(self):\n batch = []\n for idx in self._sampler:\n batch.append(idx)\n if len(batch) == self._batch_size:\n batch = sum(batch, [])\n yield batch\n batch = []\n if len(batch) > 0:\n batch = sum(batch, [])\n yield batch", "def train__iter__(self):\n\n # create worker-specific random number generator\n rng = create_rng_for_worker(self.model.current_epoch)\n\n while True:\n\n # select one file at random (with probability proportional to its annotated duration)\n file, *_ = rng.choices(\n self._train,\n weights=[f[\"duration\"] for f in self._train],\n k=1,\n )\n\n # select one annotated region at random (with probability proportional to its duration)\n segment, *_ = rng.choices(\n file[\"annotated\"],\n weights=[s.duration for s in file[\"annotated\"]],\n k=1,\n )\n\n # select one chunk at random (with uniform distribution)\n start_time = rng.uniform(segment.start, segment.end - self.duration)\n chunk = Segment(start_time, start_time + self.duration)\n\n X, one_hot_y, _ = self.prepare_chunk(file, chunk, duration=self.duration)\n\n y = self.prepare_y(one_hot_y)\n\n yield {\"X\": X, \"y\": y}", "def __iter__(self):\n return iter((self.train_stats, self.preprocessed_data, self.output_directory))", "def next(self):\n if self._curr_batch + 1 > self.num_batches:\n # no more batches in current iteration through data set so start\n # new epoch ready for another pass and indicate iteration is at end\n self.new_epoch()\n raise StopIteration()\n # create an index slice corresponding to current batch number\n batch_slice = slice(self._curr_batch * self.batch_size,\n (self._curr_batch + 1) * self.batch_size)\n inputs_batch = self.inputs[batch_slice]\n targets_batch = self.targets[batch_slice]\n self._curr_batch += 1\n return inputs_batch, targets_batch", "def __iter__(self):\n if not self.loading:\n self.reset_loading()\n self.current_batch_index = 0\n return self", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def next(self):\n\t\t# keep looping until we reach our batch size\n\t\twhile True:\n\t\t\tret = self.get_batch()\n\t\t\tself.index += self.batch_size\n\t\t\tif self.index >= len(self.texts) - self.batch_size:\n\t\t\t\tself.index = 0\n\t\t\tyield ret", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def train_batch_iter(self, batch_size, num_epochs):\n return self.batch_iter(0, batch_size, num_epochs)", "def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n num_batches = len(self.coords_batcher)\n if worker_info is None:\n # In single-processing mode\n start, end = 0, num_batches\n else:\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n shard_size = int(np.ceil(num_batches / num_workers))\n start = shard_size * worker_id\n end = min(start + shard_size, num_batches)\n return (self.get_batch(i) for i in range(start, end))", "def iter_epoch(self):\n\n # set to train mode\n self._set_train()\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n self._batch_iter(source, target, i)\n\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1", "def train(self, num_batches: int):", "def _batch_iter(self, source, target, i: int):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n # the result and loss\n result = self.model(source)\n loss = self.criterion(result, target)\n\n # optimization and backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # update the loss\n self.epoch_loss.update(loss.item(), source.size(0))\n\n # print the information\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Batch: { i } | loss: { self.epoch_loss.avg }\", end=\"\")\n\n # clean the data\n del source, target\n\n return result", "def train_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def __iter__(self):\n for batch in self.data:\n batch_size = len(batch)\n X, e1, e2, dist1, dist2, e1_pos, e2_pos, y = list(zip(*batch))\n\n x_len = max(len(x) for x in X)\n x_ids = torch.LongTensor(batch_size, x_len).fill_(0)\n dist1_padded = torch.LongTensor(batch_size, x_len).fill_(0)\n dist2_padded = torch.LongTensor(batch_size, x_len).fill_(0)\n for i, doc in enumerate(X):\n x_ids[i, :len(doc)] = torch.LongTensor(doc)\n\n dist1_padded[i, :len(doc)] = torch.LongTensor(dist1[i])\n dist1_padded[i, len(doc):] = torch.LongTensor([pos(e1_pos[i][1] - idx) for idx, _ in enumerate(x_ids[i][len(doc):], start=len(doc))])\n\n dist2_padded[i, :len(doc)] = torch.LongTensor(dist2[i])\n dist2_padded[i, len(doc):] = torch.LongTensor([pos(e2_pos[i][1] - idx) for idx, _ in enumerate(x_ids[i][len(doc):], start=len(doc))])\n\n e1_tensor = torch.LongTensor(e1)\n e2_tensor = torch.LongTensor(e2)\n\n y_tensor = torch.LongTensor(y)\n\n if self.gpu:\n x_ids = x_ids.pin_memory()\n e1_tensor = e1_tensor.pin_memory()\n e2_tensor = e2_tensor.pin_memory()\n dist1_padded = dist1_padded.pin_memory()\n dist2_padded = dist2_padded.pin_memory()\n y_tensor = y_tensor.pin_memory()\n\n yield (x_ids, e1_tensor, e2_tensor, dist1_padded, dist2_padded, y_tensor)", "def next_batch(self, batch_size):\n raise NotImplementedError", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def test_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def train_next_batch(self, batch_size=None):", "def train(self, batch):\n pass", "def next(self):\n if self.curr_idx == len(self.idx):\n raise StopIteration\n\n # Fetch the index\n i = self.idx[self.curr_idx]\n self.curr_idx += 1\n\n # Get labels & ids\n labels = self.ndlabels[i:i+self.batch_size]\n users = self.ndusers[i:i + self.batch_size]\n items = self.nditems[i:i + self.batch_size]\n\n # Get feature arrays\n if self.create_batches:\n user_features = self.nduserfeatures[i:i+self.batch_size]\n item_features = self.nditemfeatures[i:i+self.batch_size]\n else:\n # Create user feature arrays\n user_features = mx.ndarray.take(a=self.unique_user_features, indices=users)\n item_features = mx.ndarray.take(a=self.unique_item_features, indices=items)\n\n return mx.io.DataBatch([user_features, item_features], [labels], index = users, pad=0,\n provide_data=[mx.io.DataDesc(name=self.data_names[0], shape=user_features.shape),\n mx.io.DataDesc(name=self.data_names[1], shape=item_features.shape)],\n provide_label=[mx.io.DataDesc(name=self.label_names[0], shape=labels.shape)])", "def next(self):\n # Most batches will be equal to batch_size\n if self.cur < (self.n - self.batch_size):\n # Get positions of files in batch\n positions = self.order[self.cur:self.cur + self.batch_size]\n\n self.cur += self.batch_size\n\n # create Batches\n X_train, y_train, sample_weights = self.createBatches(positions)\n\n return X_train, y_train, sample_weights\n\n # Final batch is smaller than batch_size\n if self.cur < self.n:\n positions = self.order[self.cur::]\n\n # Step is maximum - next will return None\n self.cur = self.n\n\n # Create Batches\n X_train, y_train, sample_weights = self.createBatches(positions)\n\n return X_train, y_train, sample_weights\n\n else:\n # reshuffle order for next batch\n np.random.shuffle(self.order)\n\n # Reset cur\n self.cur = 0\n\n # Signal end of epoch\n return None", "def _next(self):\n batch_start, batch_end = self.batch_start, self.batch_start + self.batch_size\n X_batch, y_batch = self.X[batch_start:batch_end], self.y[batch_start:batch_end]\n X_batch, y_batch = self.process_batch(X_batch, y_batch)\n if batch_end > self.X.shape[0]:\n self.batch_start = 0\n else:\n self.batch_start = batch_end\n return X_batch, y_batch", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def next(self) -> Iterable[RecordBatch]:\n for batch in self._parent_operator.next():\n args = self._process_arguments(self._arguments, batch=batch)\n yield self._kernel(batch, args)", "def __iter__(self):\n\t\tfor i, data in enumerate(self.dataloader):\n\t\t\tif i * self.opt.batch_size >= self.opt.max_dataset_size:\n\t\t\t\tbreak\n\t\t\tyield data", "def __iter__(self):\n if not self.big:\n yield self.D[:self.P, :self.d], self.labels[:self.P]\n else:\n for i in range(int(self.P / self.batch_size)):\n yield self.D[i*self.batch_size:min(self.P, (i+1)*self.batch_size), :self.d].cuda(), \\\n self.labels[i*self.batch_size:min(self.P, (i+1)*self.batch_size), :].cuda()", "def __iter__(self):\n while True:\n if self.batches is None:\n for indexed_sentence in self.indexed_sentences:\n yield indexed_sentence\n else:\n for batch in self.batches:\n yield batch[:-1, :], batch[1:, :] # Return batch and target indices\n\n if not self.repeat:\n return", "def get_train_iterator(self) -> Iterable[Batch]:\n if self._train_name not in self._datasets:\n raise ValueError(\"Training data not provided.\")\n return self.get_iterator(self._train_name)", "def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data", "def _yield_training_validation(self, batch_index):\n # print(f'requested batch with index: {batch_index}') # DEBUG\n num_identities = len(self.identities)\n num_ids_to_resample = 0\n # manage identities in a circular way \n ids_start = (batch_index*self.batch_size)%num_identities # identities' batch start\n ids_end = ((batch_index+1)*self.batch_size)%num_identities # identities' batch end\n # Manage the indetities array in a circular manner\n #batch_identities = self.identities[ids_start:ids_end] if ids_start < ids_end else self.identities[ids_start:].append(self.identities[:ids_end])\n if ids_start < ids_end:\n batch_identities = self.identities[ids_start:ids_end]\n else:\n batch_identities = self.identities[ids_start:]\n batch_identities.extend(self.identities[:ids_end])\n samples_batch = []\n labels_batch = []\n roi_batch = []\n for identity in batch_identities:\n identity_data = self.groundtruth_metadata[identity]\n # if there are images available for that identity\n if identity_data['index'] < len(identity_data['metadata']):\n # read the image and the necessary metadata\n img_info = identity_data['metadata'][identity_data['index']]\n img_path = os.path.join(self.dataset_root_path, img_info['path'])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if OpenCV is unable to read an image, it returns None\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n # increase the index, in order to avoid this path when building subsequent batches with this identity\n identity_data['index'] += 1\n # sample another image from another identity to replace this one in the batch\n num_ids_to_resample += 1\n continue\n #batch.append(AgeEstimationSample(img, img_info['roi'], img_info['age'], 'BGR')) # cv2 reads as BGR\n img = img.astype('float32')\n samples_batch.append(img)\n labels_batch.append(img_info['age'])\n roi_batch.append(img_info['roi'])\n # increase the index, because another sample for that identity has been used\n identity_data['index'] += 1\n else:\n num_ids_to_resample += 1\n\n # if for some identities there weren't available images, take them from other identities\n # note that this mechanism solves also the problems arising when less than batch_size identities are available, by\n # picking multiple images from the available entities\n # the __len__ method in the data generator associated to this data loader is responsible for avoiding that this\n # method is called when less than batch_size \"fresh\" images are available\n last_taken_identity_index = ids_end \n num_samples_when_last_taken = num_ids_to_resample\n while(num_ids_to_resample > 0):\n identity = self.identities[ids_end] # remeber that slicing at previous step excludes upper limit\n identity_data = self.groundtruth_metadata[identity]\n if identity_data['index'] < len(identity_data['metadata']):\n last_taken_identity_index = ids_end\n num_samples_when_last_taken = num_ids_to_resample\n # read the image and the necessary metadata\n img_info = identity_data['metadata'][identity_data['index']]\n img_path = os.path.join(self.dataset_root_path, img_info['path'])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if the path does not exist or there are problems while reading the image\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n # increase the index, in order to avoid this path when building subsequent batches with this identity\n identity_data['index'] += 1\n continue\n #batch.append(AgeEstimationSample(img, img_info['roi'], img_info['age'], 'BGR')) # cv2 reads as BGR\n img = img.astype('float32')\n samples_batch.append(img)\n labels_batch.append(img_info['age'])\n roi_batch.append(img_info['roi'])\n\n num_ids_to_resample -= 1\n identity_data['index'] += 1\n \n ids_end = ((ids_end+1)%num_identities)\n if ids_end == last_taken_identity_index and num_ids_to_resample == num_samples_when_last_taken and identity_data['index'] == len(identity_data['metadata']):\n raise Exception(f'No more images available, missing {num_ids_to_resample} images!')\n\n # cannot return numpy arrays since images in batch have different sizes\n return samples_batch, labels_batch, roi_batch\n #return batch", "def __next__(self):\n # Stop iteration once data source has been exhausted\n empty = False\n while not empty:\n try:\n if self.opt.num_buckets > 1:\n label_batch, enc_input_batch, dec_input_batch = self.bucketed_next()\n else:\n label_batch, enc_input_batch, dec_input_batch = self.unbucketed_next()\n if len(enc_input_batch) > 0:\n empty = True\n except IndexError:\n raise StopIteration\n # Apply padding to the obtained batch\n if self.opt.pad:\n label_batch = self.apply_padding(label_batch)\n enc_input_batch = self.apply_padding(enc_input_batch)\n dec_input_batch = self.apply_padding(dec_input_batch)\n # Convert batch lists to numpy arrays\n label_array = np.array(label_batch, dtype=np.int32)\n enc_input_array = np.array(enc_input_batch, dtype=np.int32)\n dec_input_array = np.array(dec_input_batch, dtype=np.int32)\n return label_array, enc_input_array, dec_input_array", "def __next__(self) -> Union[None, Tuple[int, Dict[str, Any], Dict[str, Any]]]:\n self.epoch += 1\n self.iter_num += 1\n\n if self.iter_num > 1:\n\n # iterator exhaustion check\n if self.epoch > self.max_epoch:\n raise StopIteration\n\n # exit flag 1, when stop_fn succeeds in train_step or test_step\n if self.stop_fn_flag:\n raise StopIteration\n\n # set policy in train mode\n self.policy.train()\n\n epoch_stat: Dict[str, Any] = dict()\n\n if self.show_progress:\n progress = tqdm.tqdm\n else:\n progress = DummyTqdm\n\n # perform n step_per_epoch\n with progress(\n total=self.step_per_epoch, desc=f\"Epoch #{self.epoch}\", **tqdm_config\n ) as t:\n while t.n < t.total and not self.stop_fn_flag:\n data: Dict[str, Any] = dict()\n result: Dict[str, Any] = dict()\n if self.train_collector is not None:\n data, result, self.stop_fn_flag = self.train_step()\n t.update(result[\"n/st\"])\n if self.stop_fn_flag:\n t.set_postfix(**data)\n break\n else:\n assert self.buffer, \"No train_collector or buffer specified\"\n result[\"n/ep\"] = len(self.buffer)\n result[\"n/st\"] = int(self.gradient_step)\n t.update()\n\n self.policy_update_fn(data, result)\n t.set_postfix(**data)\n\n if t.n <= t.total and not self.stop_fn_flag:\n t.update()\n\n # for offline RL\n if self.train_collector is None:\n self.env_step = self.gradient_step * self.batch_size\n\n if not self.stop_fn_flag:\n self.logger.save_data(\n self.epoch, self.env_step, self.gradient_step, self.save_checkpoint_fn\n )\n # test\n if self.test_collector is not None:\n test_stat, self.stop_fn_flag = self.test_step()\n if not self.is_run:\n epoch_stat.update(test_stat)\n\n if not self.is_run:\n epoch_stat.update({k: v.get() for k, v in self.stat.items()})\n epoch_stat[\"gradient_step\"] = self.gradient_step\n epoch_stat.update(\n {\n \"env_step\": self.env_step,\n \"rew\": self.last_rew,\n \"len\": int(self.last_len),\n \"n/ep\": int(result[\"n/ep\"]),\n \"n/st\": int(result[\"n/st\"]),\n }\n )\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n return self.epoch, epoch_stat, info\n else:\n return None", "def __iter__(self):\n for b in self.dl: \n yield to_device(b, self.device) # yield pauses the execution, not store values in memory, forgets about them once iterated\n # no need to remove batch of data from device, done automatically", "def _get_epochs(self, \n n, \n X_train, \n Y_train):\n for i in range(n):\n yield self._get_batch(X_train, Y_train)", "def __next__(self):\n \n if self.current_batch_index >= self.num_batches:\n print(f'[Loader] Stopping Iteration')\n self.stop_loading()\n raise StopIteration\n else:\n # TODO: check processes are working\n print(f' (__next__) Getting BATCH..', end=''); start = time.time()\n batch = self.batch_queue.get()\n print(f' (__next__) Got BATCH! ({time.time() - start} sec)')\n self.current_batch_index += 1\n return batch", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def next_batch(self):\n next_train_index = self.curr_train_index + self.hparams.batch_size\n if next_train_index > self.num_train:\n # Increase epoch number\n epoch = self.epochs + 1\n self.reset()\n self.epochs = epoch\n batched_data = (\n self.train_images[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size],\n self.train_labels[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size])\n final_imgs = []\n images, labels = batched_data\n if self.hparams.augment_type == 'mixup':\n images, labels = augmentation_transforms.mixup_batch(\n images, labels, self.hparams.mixup_alpha)\n elif self.hparams.augment_type == 'image_freq':\n images, labels = augmentation_transforms.freq_augment(\n images,\n labels,\n amplitude=self.hparams.freq_augment_amplitude,\n magnitude=self.hparams.augmentation_magnitude,\n proportion_f=self.hparams.freq_augment_ffrac,\n probability=self.hparams.augmentation_probability)\n for data in images:\n if self.hparams.augment_type == 'autoaugment':\n epoch_policy = self.good_policies[np.random.choice(\n len(self.good_policies))]\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n elif self.hparams.augment_type == 'random':\n epoch_policy = found_policies.random_policy(\n self.hparams.num_augmentation_layers,\n self.hparams.augmentation_magnitude,\n self.hparams.augmentation_probability)\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n else:\n final_img = np.copy(data)\n if self.hparams.apply_flip_crop:\n final_img = augmentation_transforms.random_flip(\n augmentation_transforms.zero_pad_and_crop(data, 4))\n # Apply cutout\n if self.hparams.apply_cutout:\n final_img = augmentation_transforms.cutout_numpy(final_img)\n\n final_imgs.append(final_img)\n final_imgs = np.array(final_imgs, np.float32)\n if self.hparams.noise_type == 'radial':\n labels = augmentation_transforms.add_radial_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.hparams.noise_class, self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'random' or self.hparams.noise_type == 'fourier' or self.hparams.noise_type == 'f' or self.hparams.noise_type == '1/f':\n labels = augmentation_transforms.add_sinusoidal_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.direction, self.hparams.noise_class,\n self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'uniform':\n labels = augmentation_transforms.add_uniform_noise(\n labels, self.hparams.amplitude, self.hparams.noise_class)\n\n batched_data = (final_imgs, labels)\n self.curr_train_index += self.hparams.batch_size\n return batched_data", "def next_batch(self):\n if self.ptr + self.batch_size >= self.size:\n head = 0\n tail = self.batch_size\n self.ptr = self.batch_size\n else:\n head = self.ptr\n tail = self.ptr + self.batch_size\n self.ptr += self.batch_size\n return self.train_x[head:tail, 0:self.fig_w**2], self.train_y[head:tail, 0:10]", "def train(self, train_iter_fct, train_steps):\n logger.info('Start training...')\n\n # step = self.optim._step + 1\n step = self.optim._step + 1\n true_batchs = []\n accum = 0\n normalization = 0\n train_iter = train_iter_fct()\n\n total_stats = Statistics()\n report_stats = Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n while step <= train_steps:\n\n reduce_counter = 0\n for i, batch in enumerate(train_iter):\n if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):\n\n true_batchs.append(batch)\n normalization += batch.batch_size\n accum += 1\n if accum == self.grad_accum_count:\n reduce_counter += 1\n if self.n_gpu > 1:\n normalization = sum(distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n true_batchs, normalization, total_stats,\n report_stats)\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optim.learning_rate,\n report_stats)\n\n true_batchs = []\n accum = 0\n normalization = 0\n if step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0:\n self._save(step)\n\n step += 1\n if step > train_steps:\n break\n train_iter = train_iter_fct()\n\n return total_stats", "def generator(self):\n\n # Each thread gets its own randomized set of keys\n keys = self.loader.keys()\n\n while True:\n random.shuffle(keys)\n data_batch = []\n label_batch = []\n\n for key in keys:\n data = self.loader.get(key)\n s = StringIO(data)\n img = PIL.Image.open(s)\n img = img.resize((224, 224))\n img = img.convert('RGB')\n data_batch.append(np.array(img))\n\n label_str = self._classname_from_key(key)\n label_int = self._classname_to_label[label_str]\n label_arr = np.zeros(self.num_classes())\n label_arr[label_int] = 1 # one-hot encoding\n label_batch.append(label_arr)\n\n if len(data_batch) == 32: # batch size\n yield np.array(data_batch), np.array(label_batch)\n data_batch = []\n label_batch = []", "def iterate(self, batch_size=8, func=None):\n raise NotImplementedError()", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def inference_generator(self):\r\n self.initialize_if_not(training=False)\r\n self.checkpoint.load_all() # Load available weights\r\n\r\n # TODO: Make more generic by not picking first source\r\n data_source = next(iter(self._train_data.values()))\r\n while True:\r\n fetches = dict(self.output_tensors['train'], **data_source.output_tensors)\r\n start_time = time.time()\r\n outputs = self._tensorflow_session.run(\r\n fetches=fetches,\r\n feed_dict={\r\n self.is_training: False,\r\n self.use_batch_statistics: True,\r\n },\r\n )\r\n outputs['inference_time'] = 1e3*(time.time() - start_time)\r\n yield outputs", "def _next(self):\n batch_start, batch_end = self.batch_start, self.batch_start + self.batch_size\n if batch_end > self.X.shape[0]:\n self.shuffle()\n return self._next()\n else:\n batch_indices = self.indices[batch_start:batch_end]\n X_batch, y_batch = self.X[batch_indices], self.y[batch_indices]\n X_batch, y_batch = self.process_batch(X_batch, y_batch)\n self.batch_start = batch_end\n return X_batch, y_batch", "def next_batch(self, batch_size):\r\n raise NotImplementedError", "def run_batch(self):\n\n print(\"Running experiment with batch_size {}...\".format(self.batch_size))\n\n errors = self.W.new_tensor([])\n energies = self.W.new_tensor([])\n\n x = (2 * self.W.new(self.N, self.batch_size).random_(2) - 1).float() # Initialize the x vector\n rands = self.W.new(self.t_max, self.batch_size).uniform_() # The random values which will be compared to the acceptance probabilities\n idxs = self.W.new(self.t_max, self.batch_size).random_(self.N).long() # The indices which will be flipped in x at each iteration\n\n energy, wx = utils.compute_energy_batch(x, self.W, self.Y) # Compute the initial value of the energy\n\n for iteration in range(self.t_max):\n self.beta_scheduler.step(energies) # Update the value of beta according to the cooling strategy\n\n x, energy, wx = self.chain.step_batch(x, self.W, self.Y, self.beta_scheduler.beta, energy, wx, idxs[iteration], rands[iteration])\n energies = torch.cat((energies, energy.unsqueeze(0)))\n\n e = utils.compute_reconstruction_error_batch(x, self.X) # Compute the current reconstruction error\n errors = torch.cat((errors, e.unsqueeze(0)))\n\n return errors, energies, x", "def batch_iter(data, labels, lengths, batch_size, num_epochs):\n assert len(data) == len(labels) == len(lengths)\n # print(f'The length of the data: {len(data)} input samples')\n\n data_size = len(data)\n epoch_length = int(data_size / batch_size)\n # print(f'Total number of batches per epoch: {epoch_length}')\n\n for _ in range(num_epochs):\n for batch_num in range(epoch_length):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n\n xdata = data[start_index: end_index]\n ydata = labels[start_index: end_index]\n ldata = lengths[start_index: end_index]\n\n yield xdata, ydata, ldata", "def __iter__(self) -> Union[Iterator[int], Iterator[Tuple[int, Any]]]:\n self.size = self._data._dataset_size\n if (not self._data._fully_cached or\n self._data._should_call_prefetch_source):\n self._data._start_iteration()\n # First epoch of lazy loading, calling prefetch, and returning\n # indices and examples.\n iterator = self._iterator_unknown_size()\n else:\n # Non-lazy loading, or when dataset has been fully iterated.\n assert self.size is not None\n iterator = self._iterator_given_size(self.size)\n\n if self._data._should_call_prefetch_processed:\n # Processing routine is performed in main process. Yield\n # processed examples instead.\n map_fn = lambda idx: (idx, self._data._processed_cache[idx])\n elif self._data._should_yield_raw_example:\n # Return indices and examples for any epoch in this case.\n map_fn = lambda idx: (idx, self._data._source[idx])\n else:\n map_fn = None # type: ignore\n if map_fn is not None:\n return map(map_fn, iterator)\n\n return iterator", "def ptb_iterator(raw_data, batch_size, num_steps):\n raw_data = np.array(raw_data, dtype=np.int32)\n\n data_len = len(raw_data)\n batch_len = data_len // batch_size\n data = np.zeros([batch_size, batch_len], dtype=np.int32)\n for i in range(batch_size):\n data[i] = raw_data[batch_len * i:batch_len * (i + 1)]\n\n epoch_size = (batch_len - 1) // num_steps\n\n if epoch_size == 0:\n raise ValueError(\"epoch_size == 0, decrease batch_size or num_steps\")\n\n for i in range(epoch_size):\n x = data[:, i*num_steps:(i+1)*num_steps]\n y = data[:, i*num_steps+1:(i+1)*num_steps+1]\n yield (x, y)", "def batch_iter(data, batch_size):\n random.shuffle(data)\n num_data = len(data)\n num_steps = int(math.ceil(num_data/float(batch_size)))\n rem = num_data%batch_size\n for step in range(num_steps):\n if step == (num_steps-1) and rem !=0:\n data_batch = data[(step)*batch_size:]\n random.shuffle(data)\n data_batch.extend(data[:(batch_size - rem)])\n board, target, ko = process_mini_batch(data_batch)\n yield (board, target, ko)\n else:\n data_batch = data[step*batch_size:(step+1)*batch_size]\n board, target, ko = process_mini_batch(data_batch)\n yield (board, target, ko)", "def __iter__(self) -> Iterator[Tensor]:\n yield from self.tensor", "def train_epoch(self):\n for it in range(self.iter_per_epoch):\n # Get batch\n xs, _ = self.mnist.train.next_batch(100)\n _, loss, summary = self.sess.run([self.train_op, self.loss, self.summary_op],\n {self.x: xs})\n self.summary_writer.add_summary(summary, it)\n if it % 1000 == 0:\n print('Iteration {}\\t loss: {}'.format(it, loss))", "def get_batch(self, batch_shape, use='train', val_set=None, cell_type=None, verbose=True):\n self.batch_size = batch_shape[0]\n self.data = np.zeros(batch_shape)\n self.labels = np.zeros([self.batch_size, 2] if self.use_softmax else self.batch_size)\n\n files = self.get_data_files(use=use, val_set=val_set, cell_type=cell_type)\n\n random_file_idxs = np.arange(len(files))\n np.random.shuffle(random_file_idxs)\n\n i = 0\n # num_negatives = 0\n progress = 0\n for count, idx in enumerate(random_file_idxs):\n if verbose and float(count)/len(random_file_idxs) >= progress + 0.05:\n progress += 0.05\n print str(int(round(progress * 100))) + \"%\",\n sys.stdout.flush()\n if abs(progress - 0.95) <= 0.01:\n print \"\"\n f = files[idx]\n d, l = self.load_point_cloud(f)\n d = self.format_point_cloud(d, batch_shape[1])\n self.data[i] = d\n self.labels[i] = l\n\n i += 1\n if i >= self.batch_size:\n # Augment batched point clouds by rotation and jittering\n # if use == 'train':\n # self.data = PointNetDataHandler.rotate_point_cloud(self.data)\n # self.data = PointNetDataHandler.jitter_point_cloud(self.data)\n # Yield batch\n yield self.data, self.labels\n i = 0\n # num_negatives = 0", "def gen_batch(self):\n batch_size = self.batch_size\n shuffle = self.shuffle\n data = np.array(self.sentences)\n\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n while True:\n # shuffle the data at starting of each epoch\n shuffled_data = data\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield self._format_samples(shuffled_data[start_index:end_index], self.max_length)\n\n if self.mode in ['train', \"pred\"]:\n break", "def get_batch(iterator, batch_size):\n while True:\n center_batch = np.zeros(batch_size, dtype = np.uint32)\n target_batch = np.zeros((batch_size, 1), dtype = np.uint32)\n for index in range(batch_size):\n center_batch[index], target_batch[index] = next(iterator)\n\n yield center_batch, target_batch", "def batch_iter_test(x, batch_size):\n data_size = len(x)\n num_batches_per_epoch = int(data_size/batch_size) + 1\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n x_batch = x[start_index:end_index]\n yield x_batch", "def nn_batch_generator(self, x_train):\n # Shuffle the batch\n np.random.seed(self.seed)\n shuffle_index = np.arange(np.shape(x_train)[0])\n np.random.shuffle(shuffle_index)\n x = x_train[shuffle_index, :]\n y = x_train[shuffle_index, :]\n\n # Iterate until making a full epoch\n counter = 0\n while 1:\n index_batch = shuffle_index[\n self.batch_size * counter : self.batch_size * (counter + 1)\n ]\n # Decompress batch\n x_batch = x[index_batch, :]\n y_batch = y[index_batch, :]\n counter += 1\n yield (np.array(x_batch), np.array(y_batch))\n\n # Stopping rule\n if counter >= self.number_of_batches:\n counter = 0", "def make_data_iterator(dataset, batch_size):\n \n if dataset == '8gaussians':\n scale = 2.\n centers = [\n (1, 0),\n (-1, 0),\n (0, 1),\n (0, -1),\n (1. / np.sqrt(2), 1. / np.sqrt(2)),\n (1. / np.sqrt(2), -1. / np.sqrt(2)),\n (-1. / np.sqrt(2), 1. / np.sqrt(2)),\n (-1. / np.sqrt(2), -1. / np.sqrt(2))\n ]\n centers = [(scale * x, scale * y) for x, y in centers]\n while True:\n dataset = []\n for i in range(batch_size):\n point = np.random.randn(2) * .2\n center = random.choice(centers)\n point[0] += center[0]\n point[1] += center[1]\n dataset.append(point)\n dataset = torch.Tensor(dataset)\n dataset /= 1.414 # stdev\n yield dataset\n \n elif dataset == 'sine':\n while True:\n noise = 0.2\n x = torch.linspace(-4, 4, batch_size, dtype=torch.float32)\n y = np.sin(x) + noise*np.random.randn(*x.shape)\n yield torch.stack([x, y], dim=1)\n \n elif dataset == 'heteroscedastic':\n theta = torch.linspace(0, 2, batch_size)\n x = np.exp(theta)*np.tan(0.1*theta)\n while True:\n b = (0.001 + 0.5 * np.abs(x)) * np.random.normal(1, 1, batch_size)\n y = np.exp(theta)*np.sin(0.1*theta) + b\n yield torch.stack([x, y], dim=1)\n \n elif dataset == 'moon':\n noise = 0.1\n while True:\n data, _ = sklearn.datasets.make_moons(n_samples=batch_size,\n noise=noise)\n yield torch.Tensor(data)\n \n elif dataset == 'helix':\n noise = 0.2\n while True:\n t = torch.linspace(0, 20, batch_size)\n x = np.cos(t)\n x2 = np.sin(t) + noise * np.random.randn(*x.shape)\n \n yield torch.stack([x, x2, t], dim=1)\n \n elif dataset == 'circle':\n while True:\n t = np.random.random(batch_size) * 2 * np.pi - np.pi\n length = 1 - np.random.random(batch_size)*0.4\n x = torch.Tensor(np.multiply(np.cos(t), length))\n y = torch.Tensor(np.multiply(np.sin(t), length))\n \n yield torch.stack([x, y], dim=1)\n\n elif dataset == '2spirals':\n while True:\n z = torch.randn(batch_size, 2)\n n = torch.sqrt(torch.rand(batch_size // 2)) * 540 * (2 * math.pi) / 360\n d1x = - torch.cos(n) * n + torch.rand(batch_size // 2) * 0.5\n d1y = torch.sin(n) * n + torch.rand(batch_size // 2) * 0.5\n x = torch.cat([torch.stack([ d1x, d1y], dim=1),\n torch.stack([-d1x, -d1y], dim=1)], dim=0) / 3\n yield x + 0.1*z", "def get_one_shot_iterator(self):\n\n files = self._get_all_files()\n\n dataset = (\n tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers)\n .map(self._parse_function, num_parallel_calls=self.num_readers)\n .map(self._preprocess_image, num_parallel_calls=self.num_readers))\n\n if self.should_shuffle:\n dataset = dataset.shuffle(buffer_size=100)\n\n if self.should_repeat:\n dataset = dataset.repeat() # Repeat forever for training.\n else:\n dataset = dataset.repeat(1)\n\n dataset = dataset.batch(self.batch_size).prefetch(self.batch_size)\n return dataset.make_one_shot_iterator()", "def _batch_iter(line_iter, n_objects, batchsize, dtype):\n new_dt = np.dtype(dtype.descr + [('mask',bool), ('weight', float)])\n batch = np.zeros((batchsize, n_objects), dtype=new_dt)\n sample_n = 0\n for array, wt in line_iter:\n\n n_missing = n_objects - array.size\n\n array.resize(n_objects, refcheck=False)\n batch[sample_n, :] = array\n batch['weight'][sample_n, :] = wt\n if n_missing > 0:\n batch['mask'][sample_n, -n_missing:] = True\n\n sample_n += 1\n if sample_n == batchsize:\n yield batch\n sample_n = 0\n batch.fill(0)\n yield batch[:sample_n, ...]", "def _get_train_generator(self):\n while(True):\n random.shuffle(self.train)\n for data_element in self.train:\n if self.debug: \n print(\"training on: {}\".format(data_element))\n\n image, heatmap = self._generate_input_tuple(data_element)\n\n if self.debug: \n print(\"yields: {}\".format(data_element))\n\n yield (image, heatmap)", "def enhancer_iterator(self, data, labels, batch_size, num_steps):\n def seq_to_ints(seq):\n return [self.vocab.word_to_index[c] for c in seq]\n\n # Map raw data to array of ints. if all sequences are the same length L, \n # raw_data will be N-by-L\n mdata = np.array([seq_to_ints(i) for i in data], dtype=np.int32)\n num_batches = len(mdata) // batch_size\n \n # data will have batch_len elements, each of size batch_size\n # ASSUME FIXED SEQUENCE LENGTHS OFF 1000 FOR NOW (5/20/16)\n # Just grab middle self.config.num_steps nucleotides\n a = int(len(mdata[0,:])/2-self.config.num_steps/2)\n b = int(len(mdata[0,:])/2+self.config.num_steps/2)\n for i in range(num_batches):\n x = mdata[batch_size*i:batch_size*(i+1),a:b]\n if labels is not None:\n y = labels[batch_size*i:batch_size*(i+1)]\n else:\n y = None\n yield(x,y)", "def self_play_iterator_creator(hparams, num_workers, jobid):\n vocab_table = vocab_utils.create_vocab_tables(hparams.vocab_file)[0]\n data_dataset = tf.data.TextLineDataset(hparams.train_data)\n kb_dataset = tf.data.TextLineDataset(hparams.train_kb)\n skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)\n # this is the actual iterator for supervised training\n train_iterator = iterator_utils.get_iterator(\n data_dataset,\n kb_dataset,\n vocab_table,\n batch_size=hparams.batch_size,\n t1=hparams.t1.encode(),\n t2=hparams.t2.encode(),\n eod=hparams.eod,\n len_action=hparams.len_action,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n max_dialogue_len=hparams.max_dialogue_len,\n skip_count=skip_count_placeholder,\n num_shards=num_workers,\n shard_index=jobid)\n\n # this is the actual iterator for self_play_fulltext_iterator\n data_placeholder = tf.placeholder(\n shape=[None], dtype=tf.string, name=\"src_ph\")\n kb_placeholder = tf.placeholder(shape=[None], dtype=tf.string, name=\"kb_ph\")\n batch_size_placeholder = tf.placeholder(\n shape=[], dtype=tf.int64, name=\"bs_ph\")\n\n dataset_data = tf.data.Dataset.from_tensor_slices(data_placeholder)\n kb_dataset = tf.data.Dataset.from_tensor_slices(kb_placeholder)\n\n self_play_fulltext_iterator = iterator_utils.get_infer_iterator(\n dataset_data,\n kb_dataset,\n vocab_table,\n batch_size=batch_size_placeholder,\n eod=hparams.eod,\n len_action=hparams.len_action,\n self_play=True)\n\n # this is the actual iterator for self_play_structured_iterator\n self_play_structured_iterator = tf.data.Iterator.from_structure(\n tf.data.get_output_types(self_play_fulltext_iterator),\n tf.data.get_output_shapes(self_play_fulltext_iterator))\n iterators = [\n train_iterator, self_play_fulltext_iterator, self_play_structured_iterator\n ]\n\n # this is the list of placeholders\n placeholders = [\n data_placeholder, kb_placeholder, batch_size_placeholder,\n skip_count_placeholder\n ]\n return iterators, placeholders", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def prepare_epoch(dataset):\n print(\"[-] Epoch Start\")\n\n i = 0\n for sample in range(len(dataset)):\n if sample <= i + BATCH_SIZE-1:\n continue\n\n batch = []\n for i in range(i, i+BATCH_SIZE):\n batch.append(get_image(dataset[i], OUT_SIZE, CHANNELS))\n\n i += BATCH_SIZE + 1\n\n batch_images = np.array(batch).astype(np.float32)\n yield (batch_images, batch_images)\n print(\"i: {}, s: {}\".format(i, sample))\n\n print(\"[+] Epoch complete\")", "def get_batch(self, iter_name, lang1, lang2=None):\n assert lang2 == 'img'\n iterator = self.iterators.get((iter_name, lang1, lang2), None)\n if iterator is None:\n iterator = self.get_iterator(iter_name, lang1, lang2)\n self.iterators[(iter_name, lang1, lang2)] = iterator\n try:\n x = next(iterator)\n except StopIteration:\n if self.params.is_pretrain:\n self.iterators = {}\n iterator = self.get_iterator(iter_name, lang1, lang2)\n self.iterators[(iter_name, lang1, lang2)] = iterator\n x = next(iterator)\n return x", "def data_iterator(self, ithFileReader):\n print('data_iterator', ithFileReader, threading.current_thread())\n while True:\n sampX, sampY = self.sampleTrain(ithFileReader) if self.config.is_train else self.sampleValid(ithFileReader)\n yield sampX, sampY", "def multi_input_generator(df, batch_size, source_dir,shuffle=True):\n\n idx = 0\n\n while True:\n if shuffle:\n batch = df.sample(n=batch_size, replace=False)\n else:\n batch = df.loc[idx:(idx*batch_size), :] #attention:works only with batch_size=1\n\n batch_input1 = []\n batch_input2 = []\n batch_output = []\n\n # Read in each input, perform preprocessing and get labels\n for i in batch.index:\n\n full_path = source_dir + str(batch.loc[i].dx) + \"/\" + str(batch.loc[i].aug_id)\n input1 = get_input(full_path)\n input2 = [batch.loc[i].age, batch.loc[i].sex]\n output = batch.loc[i].dx\n\n input_pre = preprocess_input(input1)\n batch_input1 += [ input_pre ]\n batch_input2 += [ input2 ]\n batch_output += [ output ]\n\n # flatten the image list so that it looks like the tensorflow iterator\n batch_input1 = [val for sublist in batch_input1 for val in sublist]\n\n # Return a tuple of ([input,input],output) to feed the network\n batch_x1 = np.array(batch_input1)\n batch_x2 = np.array(batch_input2, dtype=\"float32\")\n batch_y = lb.transform(np.array(batch_output)).astype(\"float32\")\n\n yield[batch_x1, batch_x2], batch_y\n idx += 1\n\n if idx >= len(df):\n break", "def next(self):\n prev_doc_id, prev_in_doc_pos = self._state.update_state(\n self.dataset,\n self.batch_size,\n self.context_size,\n self._num_examples_in_doc)\n\n # generate the actual batch\n batch = _NCEBatch(self.context_size)\n\n while len(batch) < self.batch_size:\n if prev_doc_id == len(self.dataset):\n # last document exhausted\n batch.torch_()\n return batch\n if prev_in_doc_pos <= (len(self.dataset[prev_doc_id].text) - 1\n - self.context_size):\n # more examples in the current document\n self._add_example_to_batch(prev_doc_id, prev_in_doc_pos, batch)\n prev_in_doc_pos += 1\n else:\n # go to the next document\n prev_doc_id += 1\n prev_in_doc_pos = self.context_size\n\n batch.torch_()\n return batch", "def next(self):\n prev_doc_id, prev_in_doc_pos = self._state.update_state(\n self.dataset,\n self.batch_size,\n self.context_size,\n self._num_examples_in_doc)\n\n # generate the actual batch\n batch = _NCEBatch(self.context_size)\n\n while len(batch) < self.batch_size:\n if prev_doc_id == len(self.dataset):\n # last document exhausted\n batch.torch_()\n return batch\n if prev_in_doc_pos <= (len(self.dataset[prev_doc_id].text) - 1\n - self.context_size):\n # more examples in the current document\n self._add_example_to_batch(prev_doc_id, prev_in_doc_pos, batch)\n prev_in_doc_pos += 1\n else:\n # go to the next document\n prev_doc_id += 1\n prev_in_doc_pos = self.context_size\n\n batch.torch_()\n return batch", "def train(self, batch_training=False):\n raise NotImplementedError", "def batch_iter(data: Union[np.ndarray, List[Any]], labels: Union[np.ndarray, List[Any]],\n batch_size: int, num_epochs: int) -> Tuple[Iterable[Any], Iterable[Any]]:\n assert len(data) == len(labels)\n\n for _ in range(num_epochs):\n start_index = 0\n while start_index < len(data) - 1:\n end_index = min(len(data) - 1, start_index + batch_size)\n\n xdata = data[start_index: end_index]\n ydata = labels[start_index: end_index]\n\n yield xdata, ydata\n\n start_index += batch_size", "def valid_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def __next__(self) -> dict:\n batches = {}\n terminations = 0\n for iterator in self.iterators:\n \n try:\n data, target = next(iterator)\n batches[data.location] = (data, target)\n\n except (TypeError, AttributeError) as e:\n logging.warning(f\"Dangling pointer detected! Skipping operation... Error: {e}\")\n \n except StopIteration:\n terminations += 1\n\n # Every cached iterator has been iterated through completely\n if terminations == len(self.iterators):\n raise StopIteration\n\n return batches", "def ptb_iterator(raw_data, batch_size, num_steps, augment=False):\n raw_data = np.array(raw_data, dtype=np.int32)\n\n if augment:\n # https://github.com/cooijmanstim/recurrent-batch-normalization/blob/master/penntreebank.py#L93\n offset = np.random.randint(num_steps)\n raw_data = raw_data[offset:]\n\n data_len = len(raw_data)\n batch_len = data_len // batch_size\n data = np.zeros([batch_size, batch_len], dtype=np.int32)\n for i in range(batch_size):\n data[i] = raw_data[batch_len * i:batch_len * (i + 1)]\n\n\n epoch_size = (batch_len - 1) // num_steps\n\n if epoch_size == 0:\n raise ValueError(\"epoch_size == 0, decrease batch_size or num_steps\")\n\n for i in range(epoch_size):\n x = data[:, i*num_steps:(i+1)*num_steps]\n y = data[:, i*num_steps+1:(i+1)*num_steps+1]\n yield (x, y)", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def next_batch(self, batch_size):\n start = self.index_in_epoch\n self.index_in_epoch += batch_size\n self.epoch += batch_size/self.num_examples\n\n # When all the training data is ran, shuffles it\n if self.index_in_epoch > self.num_examples and self.shuffle:\n self.indexer = np.random.permutation(self.num_examples)\n # Start next epoch\n start = 0\n self.index_in_epoch = batch_size\n assert batch_size <= self.num_examples\n\n if self.iterate:\n batch_df = pd.DataFrame()\n if self.epoch < 1:\n batch_df = pd.read_csv(self.path, nrows=batch_size, skiprows=start)\n else:\n for i in range(batch_size):\n item = pd.read_csv(self.path, nrows=1, skiprows=self.indexer[start+i])\n batch_df = pd.concat(item)\n else:\n batch_df = self.df[start: self.index_in_epoch]\n\n examples = np.multiply(batch_df.iloc[:, 1:].values.astype(np.float), 1.0 / 255.0)\n labels = self.dense_to_one_hot(batch_df.iloc[:, 0].values.ravel(), 10)\n\n batch = {'features': examples, 'labels': labels}\n return batch", "def next_batch(self):\n for nb in xrange(self.num_batches):\n if self.batch_end < self.full_len:\n batch_X_raw = self.full_X[self.batch_start:self.batch_end]\n batch_y_raw = self.full_y[self.batch_start:self.batch_end]\n else:\n batch_X_raw = self.full_X[self.batch_start:]\n batch_y_raw = self.full_y[self.batch_start:]\n batch_X, batch_y = pad_sort_data(batch_X_raw, batch_y_raw)\n self.batch_start = self.batch_end\n self.batch_end += self.batch_size\n yield batch_X, batch_y", "def _get_iterator(self, dataset_type, eval_mode, **kwargs):", "def _train_loop(self, X, update_counter, context_mask):\n\n epoch = 0\n prev_activation = np.zeros((self.map_dim, self.data_dim))\n influences, learning_rates = self._param_update(0, len(update_counter))\n\n for idx, x in enumerate(progressbar(X)):\n\n prev_activation = self._example(x, influences, prev_activation=prev_activation)\n\n if idx in update_counter:\n\n epoch += 1\n influences, learning_rate = self._param_update(epoch, len(update_counter))\n\n if idx in context_mask:\n\n prev_activation = np.zeros((self.map_dim, self.data_dim))", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def train(model, iterator, optimizer, criterion, binary=True):\n epoch_loss = 0\n epoch_acc = 0\n\n model.train()\n\n for batch in iterator:\n optimizer.zero_grad()\n\n if binary:\n predictions = model(batch.text).squeeze(1)\n else:\n predictions = model(batch.text)\n\n loss = criterion(predictions, batch.label)\n\n if binary:\n acc = binary_accuracy(predictions, batch.label)\n else:\n acc = categorical_accuracy(predictions, batch.label)\n\n loss.backward()\n\n optimizer.step()\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)", "def train(self):\n for doc, label in zip(self.train_docs(), self.train_labels()):\n yield doc, label", "def train(self, data_iterator):\n \n if self.config['sequence_input']:\n if self.config['net_input_add_onehot']:\n input_data_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_input']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_input']))\n \n if self.config['sequence_output']:\n if self.config['net_target_add_onehot']:\n target_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_output']))\n \n training, loss_avg_t = self.setup_train(input_data_ph, target_ph)\n \n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n self.analyze_config()\n \n for epoch in range(self.config['epochs']):\n starttime = time.time()\n for step in range(self.config['epoch_steps']):\n input_data, target = next(data_iterator)\n tmp, loss_avg_value = session.run([training, loss_avg_t], {input_data_ph:input_data, target_ph:target})\n print(\"Epoch: {} Loss: {} Elapsed:{}s\".format(epoch, loss_avg_value, (time.time() - starttime)))", "def build_validation_iterator(dataset_name, batch_size, prepro_fn):\n dataset, dataset_info = tfds.load(\n dataset_name,\n split=tfds.Split.VALIDATION,\n as_supervised=True,\n with_info=True\n )\n n_samples = dataset_info.splits['validation'].num_examples\n steps_per_epoch = int(math.ceil(n_samples / batch_size))\n if prepro_fn is not None:\n dataset = dataset.map(prepro_fn, num_parallel_calls=AUTOTUNE)\n\n # Batch\n batched_dataset = dataset.padded_batch(\n batch_size,\n get_output_shapes(dataset),\n padding_values=get_padding_values(get_output_types(dataset)),\n drop_remainder=False\n )\n return batched_dataset, steps_per_epoch", "def train_with_iter(epoch, interval, batch_iter):\n alpha = 1\n model.train()\n # set the random seed for torch.generator() to shuffle the dataset\n trainSampler.set_epoch(epoch)\n if hvd.rank() == 0:\n print(\"=\"*50)\n\n if args.use_ldamloss:\n loss_weights = drw_weights(epoch, data_samples)\n ldamloss = LDAMLoss(data_samples, max_m=0.5, s=30, weight=loss_weights)\n\n epoch_start = time.time()\n for batch_idx, (data, target) in enumerate(train_loader):\n batch_start = time.time()\n\n if args.finetune:\n adjust_learning_rate_for_finetune(epoch, batch_idx)\n elif args.cosine_lr:\n adjust_learning_rate_for_cosine_decay(epoch, batch_idx)\n else:\n adjust_learning_rate(epoch, batch_idx)\n\n if args.cuda:\n if not args.fp16:\n data, target = data.cuda(), target.cuda()\n else:\n data, target = data.half().cuda(), target.cuda()\n\n # cutmix\n if args.cutmix:\n data, target_a, target_b, lam = cutmix_data(data, target, args.beta)\n output = model(data)\n if args.labelSmooth:\n loss = labelsmooth_loss(output, target_a) * lam + labelsmooth_loss(output, target_b) * (1. - lam)\n elif args.use_focalloss:\n loss = focal_loss(output, target_a) * lam + focal_loss(output, target_b) * (1. - lam)\n elif args.use_cbfocalloss:\n loss = CB_loss(output, target_a, data_samples, args.num_classes, \"focal\") * lam + CB_loss(output, target_a, data_samples, args.num_classes, \"focal\") * (1. - lam)\n elif args.use_ldamloss:\n loss = ldamloss(output, target_a) * lam + ldamloss(output, target_b) * (1. - lam) \n else:\n loss = F.cross_entropy(output, target_a) * lam + F.cross_entropy(output, target_b) * (1. - lam)\n\n else:\n output = model(data)\n if args.labelSmooth:\n loss = labelsmooth_loss(output, target)\n elif args.use_focalloss:\n loss = focal_loss(output, target)\n elif args.use_cbfocalloss:\n loss = CB_loss(output, target, data_samples, args.num_classes, \"focal\")\n elif args.use_ldamloss:\n loss = ldamloss(output, target)\n else:\n loss = F.cross_entropy(output, target)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # training batch acc\n train_acc = accuracy(output, target)\n\n batch_iter += 1\n if hvd.rank() == 0:\n for param_group in optimizer.param_groups:\n learning_rate = param_group[\"lr\"]\n\n waste_time = time.time() - batch_start\n print(\"Training Epoch: [{}/{}] batch: [{}/{}] batchiter: [{}/{}] Loss: {:.4f} Accuracy: {:.4f} Learning_rate: {:.6f} Time: {:.2f} date: {}\".format(\n epoch, args.epochs, batch_idx+1, total_train_sampler, batch_iter, total_train_sampler *\n args.epochs, loss.item(), train_acc.item(\n ), learning_rate, waste_time, str(datetime.datetime.now())\n ))\n\n # train log writer\n if log_writer:\n # train batch\n log_writer.add_scalars(\n 'train/lv1', {\n 'loss': loss.item(),\n 'acc': train_acc.item()\n }, batch_iter\n )\n\n log_writer.add_scalar(\n 'train/batch_time', waste_time, batch_iter\n )\n log_writer.add_scalar(\n 'learning_rate', learning_rate, batch_iter)\n\n # validaiton with each epoch\n if args.val_dir is not None and args.val_dir != \"\":\n validation_rank, val_acc = validatin_acc()\n if hvd.rank() == 0:\n print(\"Validation Epoch: [{}/{}] batchiter: [{}/{}] Loss: {:.4f} RankLoss: {:.4f} Accuracy: {:.4f} Time: {:.2f}\".format(\n epoch, args.epochs, batch_iter, total_train_sampler *\n args.epochs, validation_rank[\"loss\"], validation_rank[\"rank_loss\"], val_acc[\"val_acc\"], time.time(\n ) - batch_start\n ))\n\n # validation_log\n if log_writer:\n log_writer.add_scalars(\n 'Val/batch', {\n 'rank_loss': validation_rank[\"rank_loss\"],\n 'loss': validation_rank[\"loss\"],\n },\n batch_iter\n )\n log_writer.add_scalars(\n 'Val/batch_acc', {\n 'accuracy': val_acc['val_acc']\n },\n batch_iter\n )\n\n log_writer.add_scalars(\n 'Val/epoch_acc', {\n 'accuracy': val_acc['val_acc']\n },\n epoch + 1\n )\n\n # save checkpoint with the epoch\n save_checkpoint(epoch, \"epoch\")\n\n if hvd.rank() == 0:\n print(\"Epoch [{}/{}] waste time is {}\".format(epoch,\n args.epochs, time.time() - epoch_start))\n\n return batch_iter", "def train(self, epoch=50):\n # self.history = self.model.fit(self.train_images,\n # self.train_labels,\n # epochs=epoch,\n # validation_data=(self.test_images, self.test_labels))\n datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n # prepare iterator\n it_train = datagen.flow(self.train_images, self.train_labels, batch_size=64)\n # fit model\n steps = int(self.train_images.shape[0] / 64)\n self.history = self.model.fit_generator(it_train, steps_per_epoch=steps,\n epochs=epoch,\n validation_data=(self.test_images,\n self.test_labels),\n verbose=1)\n # evaluate model\n _, acc = self.model.evaluate(self.test_images, self.test_labels, verbose=0)\n LOGGER.info('> %.3f' % (acc * 100.0))\n self.summarize_diagnostics()", "def next(self):\n # Keeps under lock only the mechanism which advances\n # the indexing of each batch.\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)" ]
[ "0.7882153", "0.7777626", "0.7653529", "0.74618405", "0.7379885", "0.73270446", "0.72640824", "0.7249159", "0.7226248", "0.7225146", "0.72101283", "0.71956915", "0.71737856", "0.7097069", "0.70954365", "0.7068481", "0.7059343", "0.7055088", "0.70229006", "0.7015991", "0.7006285", "0.6957259", "0.69504", "0.6938331", "0.6918102", "0.6902777", "0.688734", "0.6882702", "0.68621874", "0.6858129", "0.6852952", "0.6849129", "0.68479866", "0.68432426", "0.6837037", "0.68175125", "0.68153834", "0.6813249", "0.6801218", "0.68009156", "0.6799771", "0.6765396", "0.675093", "0.67135686", "0.67088836", "0.6708482", "0.67045516", "0.6700376", "0.6696439", "0.6680373", "0.66772264", "0.667583", "0.66755384", "0.66755384", "0.6672666", "0.6665318", "0.664598", "0.6635581", "0.6622174", "0.6619602", "0.6612525", "0.66106427", "0.660919", "0.6599907", "0.659829", "0.6598239", "0.65980595", "0.65901536", "0.6587927", "0.6583352", "0.65808403", "0.65595806", "0.6559546", "0.65499896", "0.654658", "0.65389067", "0.65385014", "0.65362364", "0.65177137", "0.6517134", "0.65131205", "0.65131205", "0.6511918", "0.65096533", "0.6509325", "0.6507244", "0.65045726", "0.65042627", "0.6501935", "0.64943856", "0.6492458", "0.64838386", "0.6481598", "0.6475093", "0.646952", "0.6463674", "0.64603025", "0.6460107", "0.64598083", "0.6454674" ]
0.7671514
2
The evaluation flow using test_dataset without grad.
def eval(self): # parameters initialize torch = import_optional_dependency("torch") eval_total = 0 eval_correct = 0 eval_loss = 0 self._set_eval() # display the information if self.info: print(f"\rEvaluating...", end="") # start eval part for i, (source, target) in enumerate(self.eval_dataset): # send data to device source = source.to(self.device) target = target.to(self.device) result = self.model(source) eval_loss += self.criterion(result, target).item() _, predicted = torch.max(result.data, 1) eval_total += target.size(0) eval_correct += (predicted == target).sum().item() accuracy = eval_correct / eval_total eval_loss = eval_loss / eval_total if self.info: print(f"\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }") return eval_loss, accuracy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self):\n self.training = False", "def test_step(self, x_test, y_test):\n\n print(\"Evaluation:\")\n\n input_x_op = self.session.graph.get_operation_by_name(\"input_x\").outputs[0]\n input_y_op = self.session.graph.get_operation_by_name(\"input_y\").outputs[0]\n global_step_op = self.session.graph.get_operation_by_name(\"global_step\").outputs[0]\n\n loss_op = self.session.graph.get_operation_by_name(\"loss/loss\").outputs[0]\n\n predictions_op = self.session.graph.get_operation_by_name(\"output/predictions\").outputs[0] \n\n accuracy_op = self.session.graph.get_operation_by_name(\"accuracy/accuracy\").outputs[0]\n confusion_update_op = self.session.graph.get_operation_by_name(\"accuracy/confusion_update\").outputs[0]\n\n d_ = {\n input_x_op: x_test,\n input_y_op: y_test\n }\n\n self.init_dataset(d_)\n\n valid_batches_per_epoch = (len(x_test) - 1) // self.FLAGS.batch_size + 1\n\n sum_accuracy = 0\n \n confusion_variable = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"accuracy/confusion\")[0]\n self.session.run([confusion_variable.initializer])\n\n for current_step in range(valid_batches_per_epoch):\n\n if self.FLAGS.summary:\n step, summaries, loss, accuracy, cnf_matrix, predictions = self.session.run(\n [global_step_op, self.dev_summary_op, loss_op, accuracy_op, confusion_update_op, predictions_op])\n\n self.writer.add_summary(summaries, step)\n else:\n step, loss, accuracy, cnf_matrix, predictions = self.session.run(\n [global_step_op, loss_op, accuracy_op, confusion_update_op, predictions_op]) \n\n sum_accuracy += accuracy\n\n try:\n all_predictions = np.concatenate((all_predictions, predictions), axis=0)\n except NameError:\n all_predictions = predictions\n\n\n valid_accuracy = sum_accuracy / valid_batches_per_epoch\n\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: valid_accuracy {:g}\".format(time_str, valid_accuracy))\n print(\"Confusion matrix:\")\n print(cnf_matrix)\n\n return valid_accuracy, all_predictions", "def evaluate(self, dataset):\n\t\tpass", "def eval(self):\n self.train(mode=False)", "def _evaluate_during_fit(self, test_loader, epoch):", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def __call__(self, test_data, verbose=True):\n\n self.model.eval()\n with torch.no_grad():\n loss, rho, nmse = self._iteration(test_data)\n if verbose:\n print(f'\\n=> Test result: \\nloss: {loss:.3e}'\n f' rho: {rho:.3e} NMSE: {nmse:.3e}\\n')\n return loss, rho, nmse", "def test_evaluate(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n metric = model.evaluate('test')\n self.assertLessEqual(0, metric)\n self.assertGreaterEqual(1, metric)", "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def test(self):\n self.training = False", "def test(self, test_loader):\n\n self.model.eval()\n with torch.no_grad():\n return self.tester(test_loader, verbose=False)", "def evaluate(self,\n test_loader: Type[torch.utils.data.DataLoader],\n **kwargs: float) -> float:\n # initialize loss accumulator\n test_loss = 0.\n # compute the loss over the entire test set\n with torch.no_grad():\n for data in test_loader:\n if len(data) == 1: # VAE mode\n x = data[0]\n loss = self.svi.step(x.to(self.device), **kwargs)\n else: # VED or cVAE mode\n x, y = data\n loss = self.svi.step(\n x.to(self.device), y.to(self.device), **kwargs)\n test_loss += loss\n\n return test_loss / len(test_loader.dataset)", "def eval_on_dataset(sess, G, iterator, dataset_name=\"validation\") :\n print(\">>> Evaluating model on %s\" % (dataset_name))\n step = 0\n current_epoch = iterator.epoch\n \n # Evaluate against validation before training to get baseline performance! \n step = 0\n cumulative_loss = 0.0\n all_probs = np.array([], dtype=np.float32)\n all_targets = np.array([], dtype=np.float32)\n while current_epoch == iterator.epoch : \n step += 1\n this_x, this_y, this_seqlen, this_mask = iterator.next()\n feed_dict = {G['input_placeholder']: this_x, \n G['target_placeholder']: this_y, \n G['seqlen_placeholder']: this_seqlen, \n G['loss_mask_placeholder']: this_mask}\n loss_value, probs = sess.run([G['loss'], G['output_probs']], feed_dict=feed_dict)\n cumulative_loss += loss_value\n all_probs = np.append(all_probs, probs)\n all_targets = np.append(all_targets, this_y)\n val_loss = cumulative_loss / float(step)\n auroc = roc_auc_score(all_targets, all_probs)\n auprc = average_precision_score(all_targets, all_probs)\n print(\">>> (%s) After epoch %d, loss = %.4f, auroc = %.4f, auprc = %.4f \" % (dataset_name, current_epoch, val_loss, auroc, auprc))\n iterator.epoch = current_epoch", "def train_and_eval():\n # train_file_name = 'adult.data'\n # test_file_name = 'adult.test'\n train_file_name = 'poker-hand-testing.data'\n test_file_name = 'poker-hand-training-true.data'\n #test_file_name = maybe_download()\n df_train = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n df_test = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n\n #df_train[LABEL_COLUMN] = (df_train[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n #df_test[LABEL_COLUMN] = (df_test[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n\n model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir\n print(\"model directory = %s\" % model_dir)\n m = build_estimator(model_dir)\n print(m)\n m.fit(input_fn=lambda: input_fn(df_train), steps=FLAGS.train_steps)\n results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)\n for key in sorted(results):\n print(\"%s: %s\" % (key, results[key]))", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate(self, train_set, test_set, shuffle_batch=True,\n epochs=25, lr_decay=0.95, sqr_norm_lim=9,labels=None,model=None): \n cost = self.negative_log_likelihood(self.y) \n dropout_cost = self.dropout_negative_log_likelihood(self.y)\n # adadelta upgrades: dict of variable:delta\n grad_updates = self.sgd_updates_adadelta(dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n # shuffle dataset and assign to mini batches.\n # if dataset size is not a multiple of batch size, replicate \n # extra data (at random)\n np.random.seed(3435)\n batch_size = self.batch_size\n if train_set.shape[0] % batch_size > 0:\n extra_data_num = batch_size - train_set.shape[0] % batch_size\n #extra_data = train_set[np.random.choice(train_set.shape[0], extra_data_num)]\n perm_set = np.random.permutation(train_set) \n extra_data = perm_set[:extra_data_num]\n new_data = np.append(train_set, extra_data, axis=0)\n else:\n new_data = train_set\n \n shuffled_data = np.random.permutation(new_data) # Attardi\n n_batches = shuffled_data.shape[0]/batch_size\n # divide train set into 90% train, 10% validation sets\n n_train_batches = int(np.round(n_batches*0.8))\n n_val_batches = n_batches - n_train_batches\n train_set = shuffled_data[:n_train_batches*batch_size,:]\n val_set = shuffled_data[n_train_batches*batch_size:,:] \n # push data to gpu \n # the dataset has the format [word_indices,padding,user,label]\n train_set_x, train_set_y = shared_dataset(train_set[:,:-2], train_set[:,-1]) \n train_set_u = theano.shared(np.asarray(train_set[:,-2],dtype='int32')) \n # val_set_x = val_set[:,:-2]\n # val_set_u = val_set[:,-2]\n # val_set_y = val_set[:,-1]\n val_set_x, val_set_y = shared_dataset(val_set[:,:-2], val_set[:,-1])\n val_set_u = theano.shared(np.asarray(val_set[:,-2],dtype='int32')) \n test_set_x = test_set[:,:-2]\n test_set_u = test_set[:,-2]\n test_set_y = test_set[:,-1] \n batch_start = self.index * batch_size\n batch_end = batch_start + batch_size\n\n # compile Theano functions to get train/val/test errors\n \n \n test_y_pred = self.predict(test_set_x,test_set_u)\n test_error = T.mean(T.neq(test_y_pred, self.y))\n # errors on train set\n if self.Users is not None:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]\n },\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end], \n self.u: val_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.u, self.y], test_error, allow_input_downcast=True)\n else:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.y], test_error, allow_input_downcast=True)\n\n # start training over mini-batches\n print 'training...' \n best_val_perf = 0\n test_perf = 0 \n patience = 5\n drops = 0\n prev_val_perf = 0 \n for epoch in xrange(epochs):\n start_time = time.time()\n # FIXME: should permute whole set rather than minibatch indexes\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n self.set_zero(self.zero_vec) # CHECKME: Why?\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n self.set_zero(self.zero_vec)\n train_losses = [train_error(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1 - np.mean(val_losses) \n info = 'epoch: %i\\%i (%.2f secs) train acc: %.2f %% | val acc: %.2f %%' % (\n epoch,epochs, time.time()-start_time, train_perf * 100., val_perf*100.) \n # from ipdb import set_trace; set_trace()\n if val_perf > prev_val_perf: \n drops=0\n if val_perf >= best_val_perf:\n best_val_perf = val_perf\n info+= \" **\"\n if model:\n # print \"save model\"\n self.save(model)\n if self.Users is not None:\n test_loss = test_model(test_set_x, test_set_u, test_set_y)\n else:\n test_loss = test_model(test_set_x, test_set_y)\n test_perf = 1 - test_loss \n else: \n drops+=1\n if drops >= patience:\n print \"Ran out of patience...\"\n break\n prev_val_perf = val_perf\n print info\n # set_trace() \n return test_perf", "def evaluate(args, dev_dataset, model):\n\n if args.dynamic_batching:\n dev_sampler = CustomBatchSampler(dev_dataset, args.dev_batch_size)\n dev_dataloader = DataLoader(\n dev_dataset,\n batch_sampler=dev_sampler,\n num_workers=0,\n collate_fn=dynamic_padding_collate_fn\n )\n else:\n dev_sampler = SequentialSampler(dev_dataset)\n dev_dataloader = DataLoader(dev_dataset, sampler=dev_sampler,\n batch_size=args.dev_batch_size, num_workers=0)\n\n model.eval()\n loss_fn = nn.CrossEntropyLoss(ignore_index=0)\n iterator = tqdm(dev_dataloader, desc=\"Evaluation\", smoothing=0.05)\n loss_cum = None\n num_batch = 0\n for step, batch_cpu in enumerate(iterator):\n num_batch += 1\n\n batch = tuple(t.to(args.device) for t in batch_cpu)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Calculate loss of just the question part\n q_mask = (inputs['token_type_ids'] == 2)\n masked_labels = inputs['input_ids'].masked_fill(~q_mask, 0)\n shift_labels = masked_labels[..., 1:].contiguous()\n\n lm_logits = outputs[0]\n shift_logits = lm_logits[..., : -1, :].contiguous()\n loss = loss_fn(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n\n if loss_cum is None:\n loss_cum = loss\n else:\n loss_cum += loss\n\n model.train()\n\n return loss_cum.item() / num_batch", "def test(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, _ = self._get_smi_dl(phase=\"test\", shuffle=False)\n test_loader = tqdm(self.test_loader, desc='testing...')\n\n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.to(self.device)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].to(self.device)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1:\n # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n\n test_loss += batch_loss\n test_loader.set_description(f\"testing...loss={test_loss / epoch_test_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size # self.test_size\n\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n message = f\"{self.args.expt_name}\\n\"\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def _main(\n get_data: callable,\n EPOCHS: int = 10,\n PERIOD: int = 5,\n BATCH_SIZE: int = 256,\n LR: float = 1e-5,\n NEURONS: list = [128, 128],\n forecast: bool = False,\n tuning: bool = True,\n) -> None:\n @tf.function\n def train_step(x, y):\n with tf.GradientTape() as tape:\n pred = model(x)\n loss = loss_object(y, pred)\n grad = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grad, model.trainable_variables))\n train_loss.update_state(loss)\n train_accuracy.update_state(y, pred)\n\n\n @tf.function\n def test_step(x, y):\n # Test and validation step have the same operation.\n pred = model(x)\n loss = loss_object(y, pred)\n dev_loss.update_state(loss)\n dev_accuracy.update_state(y, pred)\n\n print(\"Reading data...\")\n X_train, X_dev, y_train, y_dev, X_test = get_data()\n print(\"X_train@{}, X_dev@{}\".format(X_train.shape, X_dev.shape))\n train_ds = tf.data.Dataset.from_tensor_slices(\n (X_train, y_train)).shuffle(int(1e6)).batch(BATCH_SIZE)\n\n dev_ds = tf.data.Dataset.from_tensor_slices(\n (X_dev, y_dev)).batch(BATCH_SIZE)\n\n num_fea = X_train.shape[1]\n model = NN(num_neurons=NEURONS)\n\n loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=False)\n optimizer = tf.keras.optimizers.Adam(learning_rate=LR)\n\n train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n train_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"train_accuracy\")\n\n dev_loss = tf.keras.metrics.Mean(name=\"dev_loss\")\n dev_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"dev_accuracy\")\n\n trace = {\"train\": [], \"val\": []}\n for epoch in range(EPOCHS):\n train_loss.reset_states()\n train_accuracy.reset_states()\n dev_loss.reset_states()\n dev_accuracy.reset_states()\n # Loop over batches.\n for x, y in train_ds:\n # x @ (batch_size, num_features)\n # y @ (batch_size, 1) --> probit\n train_step(x, y)\n\n for t_x, t_y in dev_ds:\n test_step(t_x, t_y)\n\n if (epoch+1) % PERIOD == 0:\n report = \"Epoch {:d}, Loss: {:0.6f}, Accuracy: {:0.6f}, Validation Loss: {:0.6f}, Validation Accuracy: {:0.6f}\"\n print(report.format(\n epoch+1,\n train_loss.result(),\n train_accuracy.result()*100,\n dev_loss.result(),\n dev_accuracy.result()*100))\n\n # Record loss\n trace[\"train\"].append(train_loss.result())\n trace[\"val\"].append(dev_loss.result())\n\n # AUC\n pred_train = model(X_train).numpy()\n pred_dev = model(X_dev).numpy()\n\n auc_train = metrics.roc_auc_score(y_true=y_train, y_score=pred_train)\n auc_dev = metrics.roc_auc_score(y_true=y_dev, y_score=pred_dev)\n\n print(\"AUC on Training Set: {: 0.6f}\".format(auc_train))\n print(\"AUC on Developing Set: {: 0.6f}\".format(auc_dev))\n\n if forecast:\n pred = model(X_test)\n return pred.numpy()\n if tuning:\n return {\n \"EPOCHS\": EPOCHS,\n \"BATCH_SIZE\": BATCH_SIZE,\n \"LR\": LR,\n \"NEURONS\": NEURONS,\n \"AUC_TRAIN\": auc_train,\n \"AUC_DEV\": auc_dev,\n \"LOSS_TRAIN\": train_loss.result().numpy(),\n \"LOSS_DEV\": dev_loss.result().numpy(),\n \"ACCURACY_TRAIN\": train_accuracy.result().numpy(),\n \"ACCURACY_DEV\": dev_accuracy.result().numpy(),\n }\n\n plt.plot(np.log(trace[\"train\"]))\n plt.plot(np.log(trace[\"val\"]))\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Log Cross Entropy Loss\")\n plt.legend([\"Training\", \"Validation\"])\n plt.title(\"LR={}, AUC_train={:0.3f}, AUC_dev={:0.3f}\".format(LR, auc_train, auc_dev))\n plt.show()", "def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)", "def test(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n \r\n dataset.set_split('test')\r\n batch_generator = generate_nmt_batches(dataset, \r\n batch_size=len(dataset), \r\n device=args.device)\r\n\r\n acc_sum = 0.0\r\n model.eval()\r\n \r\n for batch_index, batch_dict in enumerate(batch_generator):\r\n # step 1. compute the output\r\n if isinstance(model,NMTModelWithMLTM):\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_mltm_vector'],\r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n else:\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n\r\n acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index)\r\n acc_sum += acc_t\r\n \r\n return acc_sum / (batch_index+1)", "def evaluate():\n # 1. Build eval model.\n with tf.name_scope(\"eval\"):\n X,y = model.placeholders()\n logits = model.inference(X)\n top_k_op = tf.nn.in_top_k(logits, y, 1)\n\n # 2. filename_queue with eval data\n filename_queue = inputHandler.get_filenames_queue(\n data_dir=FLAGS.data_dir,\n is_train=False)\n # 3. Data feed ops, placed on CPU.\n with tf.device('/cpu:0'):\n image_batch_op, label_batch_op = inputHandler.get_data_batch(\n filename_queue,\n batch_size=FLAGS.batch_size,\n is_train=False)\n\n # merged = tf.summary.merge_all()\n # test_writer = tf.summary.FileWriter(FLAGS.log_dir, tf.get_default_graph())\n\n with tf.Session() as sess:\n \"\"\"\n Note: Do not init variables again; This will load new set of variables,\n as if a new training session is being started, ignoring pre-trained\n weights. Just load variables from checkpoint.\n The precision value should be same for different runs of this experiment.\n i.e re-running eval with same checkpoint, data and other values should\n give same precision value. If a different precision value is returned\n for each run, most possibly the pre-trained weights are messed up\n somewhere.\n \"\"\"\n print(sess.run(tf.report_uninitialized_variables()))\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(FLAGS.ckpt_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found')\n return\n print(sess.run(tf.report_uninitialized_variables()))\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n num_iters = int(math.ceil(inputHandler.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL/FLAGS.batch_size))\n i = 0\n true_count = 0\n\n try:\n while (not coord.should_stop()) and (i < num_iters):\n image_batch, label_batch = sess.run([image_batch_op, label_batch_op])\n predictions = sess.run(top_k_op,\n feed_dict={\n X: image_batch,\n y: label_batch\n }\n )\n i += 1\n # test_writer.add_summary(summary, i)\n true_count += np.sum(predictions)\n\n except tf.errors.OutOfRangeError:\n print('Epoch limit reached.')\n\n precision = true_count / inputHandler.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n print(\"Precision: \", precision)\n # test_writer.close()\n coord.request_stop()\n coord.join(threads)\n sess.close()", "def linear_eval(train_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========LINEAR EVAL==========')\n\n # Filter out undesired examples with excluded_label\n ds = train_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.lineareval_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\n\n # Base network and projection head layers are not trainable in linear_eval\n model.resnet.trainable = False\n model.ph.trainable = False \n\n for epoch in range(epochs): \n model.resnet.trainable = False\n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised (linear eval) loss')\n print(train_loss.result())\n print('supervised (linear eval) accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds is not None:\n evaluate(eval_ds, model, task)\n \n model.resnet.trainable = True\n model.ph.trainable = True", "def val_step(self, data_batch, **kwargs):\n output = self.forward_test(**data_batch, **kwargs)\n return output", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def evaluate_on_test_set(self, energy_threshold=80):\n losses = []\n accuracies = []\n codings = []\n codings_label = []\n losses_item = []\n test_loader = self.dm.get_test_set()\n N = 0\n gamma_sum = 0\n mu_sum = 0\n cov_mat_sum = 0\n\n # Change the model to evaluation mode\n self.model.eval()\n\n with torch.no_grad():\n # Create pytorch's train data_loader\n train_loader = self.dm.get_train_set()\n\n for i, data in enumerate(train_loader, 0):\n # transfer tensors to selected device\n train_inputs, _ = data[0].to(self.device), data[1].to(self.device)\n\n # forward pass\n code, x_hat, cosim, z, gamma = self.model(train_inputs)\n phi, mu, cov_mat = self.model.compute_params(z, gamma)\n\n batch_gamma_sum = gamma.sum(axis=0)\n\n gamma_sum += batch_gamma_sum\n mu_sum += mu * batch_gamma_sum.unsqueeze(-1) # keep sums of the numerator only\n cov_mat_sum += cov_mat * batch_gamma_sum.unsqueeze(-1).unsqueeze(-1) # keep sums of the numerator only\n\n N += train_inputs.shape[0]\n\n train_phi = gamma_sum / N\n train_mu = mu_sum / gamma_sum.unsqueeze(-1)\n train_cov = cov_mat_sum / gamma_sum.unsqueeze(-1).unsqueeze(-1)\n\n print(\"Train N:\", N)\n print(\"phi :\\n\", train_phi)\n print(\"mu :\\n\", train_mu)\n print(\"cov :\\n\", train_cov)\n\n # Calculate energy using estimated parameters\n\n train_energy = []\n train_labels = []\n train_z = []\n\n for i, data in enumerate(train_loader, 0):\n # transfer tensors to selected device\n train_inputs, train_inputs_labels = data[0].to(self.device), data[1]\n\n # forward pass\n code, x_hat, cosim, z, gamma = self.model(train_inputs)\n sample_energy, pen_cov_mat = self.model.estimate_sample_energy(z,\n train_phi,\n train_mu,\n train_cov,\n average_it=False,\n device=self.device)\n\n train_energy.append(sample_energy.cpu().numpy())\n train_z.append(z.cpu().numpy())\n train_labels.append(train_inputs_labels.numpy())\n\n train_energy = np.concatenate(train_energy, axis=0)\n train_z = np.concatenate(train_z, axis=0)\n train_labels = np.concatenate(train_labels, axis=0)\n\n test_energy = []\n test_labels = []\n test_z = []\n\n for data in test_loader:\n test_inputs, label_inputs = data[0].to(self.device), data[1]\n\n # forward pass\n code, x_hat, cosim, z, gamma = self.model(test_inputs)\n sample_energy, pen_cov_mat = self.model.estimate_sample_energy(z,\n train_phi,\n train_mu,\n train_cov,\n average_it=False,\n device=self.device)\n test_energy.append(sample_energy.cpu().numpy())\n test_z.append(z.cpu().numpy())\n test_labels.append(label_inputs.numpy())\n\n test_energy = np.concatenate(test_energy, axis=0)\n test_z = np.concatenate(test_z, axis=0)\n test_labels = np.concatenate(test_labels, axis=0)\n\n combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n combined_labels = np.concatenate([train_labels, test_labels], axis=0)\n\n thresh = np.percentile(combined_energy, energy_threshold)\n print(\"Threshold :\", thresh)\n\n # Prediction using the threshold value\n pred = (test_energy > thresh).astype(int)\n groundtruth = test_labels.astype(int)\n\n accuracy = accuracy_score(groundtruth, pred)\n precision, recall, f_score, support = prf(groundtruth, pred, average='binary')\n\n print(f\"Accuracy:{accuracy}, \"\n f\"Precision:{precision}, \"\n f\"Recall:{recall}, \"\n f\"F-score:{f_score}, \"\n f\"\\nconfusion-matrix: {confusion_matrix(groundtruth, pred)}\")\n\n # switch back to train mode\n self.model.train()\n return accuracy, precision, recall, f_score, test_z, test_labels, combined_energy", "def run_eval_step(self, sess, batch):\n feed_dict = self._make_feed_dict(batch)\n to_return = {\n 'summaries': self._summaries,\n 'loss': self._loss,\n 'global_step': self.global_step,\n }\n if self._hps.coverage:\n to_return['coverage_loss'] = self._coverage_loss\n res = sess.run(to_return, feed_dict)\n return res", "def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')", "def test(self, curr_epoch):\n if not self.config.full_test_flag and (curr_epoch % self.config.test_step == 0 or\n curr_epoch == 0 or\n curr_epoch == self.config.epochs - 1):\n self.evaluator.test(curr_epoch)\n else:\n if curr_epoch == self.config.epochs - 1:\n self.evaluator.test(curr_epoch)", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def _test(self):\r\n lr, hr = self.sess.run(self.test_batch)\r\n res = self.sess.run(\r\n [self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_ad_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: False\r\n })\r\n\r\n return res", "def test_model(net, data_loader):\n net.eval()\n running_loss = 0.0\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n y_d = data['y_descreen']\n outputs = net(X)\n loss = criterion(outputs, y_d)\n running_loss += loss\n return running_loss", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def train_and_evaluate(name, model, train, test, evaluation, final_eval, output_dir):\n\n print(\"---\" * 5)\n print(\"Running pipeline for {}\".format(name))\n\n plot_dir = os.path.join(output_dir, \"plots\")\n\n pipeline = make_pipeline(model)\n\n X_train, y_train = train.drop(\n [\"PM10\"], axis=1).values, train[\"PM10\"].values\n X_test, y_test = test.drop([\"PM10\"], axis=1).values, test[\"PM10\"].values\n X_eval, y_eval = evaluation.drop(\n [\"PM10\"], axis=1).values, evaluation[\"PM10\"].values\n X_final, y_final = final_eval.drop(\n [\"PM10\"], axis=1), final_eval[\"PM10\"].values\n\n # first round - fit on train, predict on test\n print(\"Fitting pipeline on train data\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_test, yhat, title=\"{} - Predicted vs. Actual on Test\".format(name), output_dir=plot_dir)\n\n # second round - fit on train + test, predict on evaluation\n X_train = np.concatenate([X_train, X_test])\n y_train = np.concatenate([y_train, y_test])\n print(\"Fitting pipeline on train + test data\")\n pipeline.fit(X_train,y_train)\n yhat = pipeline.predict(X_eval)\n mae = mean_absolute_error(y_eval,yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(y_eval,yhat,title=\"{} - Predicted vs. Actual on Evaluation\".format(name),output_dir=plot_dir)\n\n # final round - fit on last X hours, by which the actual score will be measured\n X_train = np.concatenate([X_train, X_eval])\n y_train = np.concatenate([y_train, y_eval])\n print(\"Fitting pipeline on all \\\"all available data\\\"\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_final)\n mae = mean_absolute_error(y_final, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_final, yhat, title=\"{} - Predicted vs. Actual\".format(name), output_dir=plot_dir)\n\n # save the model\n joblib.dump(model, os.path.join(\n output_dir, \"models\", \"{}.joblib\".format(name)))\n\n return yhat, mae", "def test(self):\n self.eval()\n test_mask = self.data.test_mask\n labels = self.data.y\n output = self.forward(self.data)\n # output = self.output\n loss_test = F.nll_loss(output[test_mask], labels[test_mask])\n acc_test = utils.accuracy(output[test_mask], labels[test_mask])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def evaluate():\n with tf.Graph().as_default() as g:\n \n # Get hazy and clean images for SYNTHIA.\n val = FLAGS.val\n hazy_images, clean_images_ground_truth, _ = model_spec.input(val)\n\n # Build a Graph that computes the dehazed predictions from the\n # inference model.\n clean_images_predicted = model_spec.inference(hazy_images)\n\n # Calculate loss (only the data term).\n loss = model_spec.data_loss(clean_images_predicted, clean_images_ground_truth)\n\n # Restore the moving average version of the learned variables for eval.\n variable_averages = tf.train.ExponentialMovingAverage(\n model_spec.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)\n\n while True:\n eval_once(saver, summary_writer, loss, summary_op)\n if FLAGS.run_once:\n print('Finished one-off evaluation.')\n break\n time.sleep(FLAGS.eval_interval_secs)", "def test_distributed(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, self.test_sampler = self._get_smi_dl(phase=\"test\", shuffle=False)\n self.test_sampler.set_epoch(0)\n if self.rank == 0:\n test_loader = tqdm(self.test_loader, desc='testing...')\n else:\n test_loader = self.test_loader\n \n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.cuda(non_blocking=True)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].cuda(non_blocking=True)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n test_batch_size = torch.tensor([test_batch_size]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(test_batch_size, dist.ReduceOp.SUM)\n test_batch_size = test_batch_size.item()\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n batch_correct_preds = torch.tensor([batch_correct_preds]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_correct_preds, dist.ReduceOp.SUM)\n batch_correct_preds = batch_correct_preds.item()\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1 and self.rank == 0: # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n \n batch_loss = torch.tensor([batch_loss]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_loss, dist.ReduceOp.SUM)\n batch_loss = batch_loss.item()\n test_loss += batch_loss\n if self.rank == 0:\n test_loader.set_description(f\"testing...loss={test_loss / test_batch_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n \n dist.barrier()\n message = f\"{self.args.expt_name}\\n\"\n if self.rank == 0:\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def train_eval():\n logging.info('Setting strategy to mirrored strategy...')\n # Synchronous SGD\n strategy = tf.distribute.MirroredStrategy()\n # Make directories if they do not exist yet.\n if FLAGS.checkpoint_dir and not tf.io.gfile.exists(FLAGS.checkpoint_dir):\n logging.info('Making new checkpoint directory: %s', FLAGS.checkpoint_dir)\n tf.io.gfile.makedirs(FLAGS.checkpoint_dir)\n if FLAGS.plot_dir and not tf.io.gfile.exists(FLAGS.plot_dir):\n logging.info('Making new plot directory: %s', FLAGS.plot_dir)\n tf.io.gfile.makedirs(FLAGS.plot_dir)\n\n with strategy.scope():\n logging.info('Getting train step...')\n step = tf.compat.v1.train.get_or_create_global_step()\n smurf = create_smurf()\n if not FLAGS.from_scratch:\n # First restore from init_checkpoint_dir, which is only restored from but\n # not saved to, and then restore from checkpoint_dir if there is already\n # a model there (e.g. if the run was stopped and restarted).\n if FLAGS.init_checkpoint_dir:\n logging.info('Initializing model from checkpoint %s.',\n FLAGS.init_checkpoint_dir)\n logging.info('Restoring smurf...')\n smurf.update_checkpoint_dir(FLAGS.init_checkpoint_dir)\n smurf.restore(\n reset_optimizer=FLAGS.reset_optimizer,\n reset_global_step=FLAGS.reset_global_step)\n smurf.update_checkpoint_dir(FLAGS.checkpoint_dir)\n\n if FLAGS.checkpoint_dir:\n logging.info('Restoring model from checkpoint %s.',\n FLAGS.checkpoint_dir)\n smurf.restore()\n else:\n logging.info('Starting from scratch.')\n\n logging.info('Making eval datasets and eval functions.')\n\n if FLAGS.eval_on:\n logging.info('Making eval function...')\n evaluate, _ = smurf_data.make_eval_function(\n FLAGS.eval_on,\n FLAGS.height,\n FLAGS.width,\n progress_bar=True,\n plot_dir=FLAGS.plot_dir,\n num_plots=50)\n\n if FLAGS.train_on:\n # Build training iterator.\n logging.info('Making training iterator.')\n train_dataset = smurf_data.make_train_dataset(\n FLAGS.train_on,\n FLAGS.height,\n FLAGS.width,\n FLAGS.shuffle_buffer_size,\n FLAGS.global_gpu_batch_size,\n FLAGS.seq_len,\n crop_instead_of_resize=FLAGS.crop_instead_of_resize,\n apply_augmentation=True,\n include_ground_truth=('unsupervised' not in FLAGS.train_mode),\n resize_gt_flow=FLAGS.resize_gt_flow_supervision,\n return_full_scale=FLAGS.full_size_warp,\n )\n train_dataset = strategy.experimental_distribute_dataset(train_dataset)\n train_it = iter(train_dataset)\n\n if FLAGS.check_data and FLAGS.plot_dir:\n smurf_plotting.plot_data(train_it, FLAGS.plot_dir, num_plots=100)\n if FLAGS.train_mode in ('supervised', 'supervised-sequence'):\n # Since this is the only loss in this setting, and the Adam optimizer\n # is scale invariant, the actual weight here does not matter for now.\n weights = {'supervision': FLAGS.weight_supervision}\n else:\n # Note that self-supervision loss is added during training.\n weights = {\n 'supervision': FLAGS.weight_supervision,\n 'census': FLAGS.weight_census,\n }\n\n # Switch off loss-terms that have weights < 1e-7.\n weights = {\n k: v for (k, v) in weights.items() if v > 1e-7\n }\n\n def weight_selfsup_fn():\n step = tf.compat.v1.train.get_or_create_global_step()\n # Start self-supervision only after a certain number of steps.\n # Linearly increase self-supervision weight for a number of steps.\n ramp_up_factor = tf.clip_by_value(\n float(step - (FLAGS.selfsup_after_num_steps - 1)) /\n float(max(FLAGS.selfsup_ramp_up_steps, 1)), 0., 1.)\n return FLAGS.weight_selfsup * ramp_up_factor\n\n logging.info('Starting training loop.')\n epoch = 0\n\n while True:\n current_step = tf.compat.v1.train.get_or_create_global_step().numpy()\n\n # Set which occlusion estimation methods could be active at this point.\n # (They will only be used if occlusion_estimation is set accordingly.)\n occ_active = {\n 'brox':\n current_step > FLAGS.occ_after_num_steps_brox,\n 'wang':\n current_step > FLAGS.occ_after_num_steps_wang,\n }\n\n current_weights = {k: v for k, v in weights.items()}\n\n # Prepare self-supervision if it will be used in the next epoch.\n if (FLAGS.weight_selfsup > 1e-7 and current_step +\n FLAGS.epoch_length > FLAGS.selfsup_after_num_steps):\n\n # Add selfsup weight with a ramp-up schedule. This will cause a\n # recompilation of the training graph defined in smurf.train(...).\n current_weights['selfsup'] = weight_selfsup_fn\n\n if current_step > FLAGS.smoothness_after_num_steps:\n current_weights['smooth1'] = FLAGS.weight_smooth1\n current_weights['smooth2'] = FLAGS.weight_smooth2\n\n def train_step(inputs):\n weights = {\n k: v() if callable(v) else v for k, v in current_weights.items()\n }\n losses, gradients, variables = smurf.loss_and_grad(\n inputs,\n weights,\n occ_active=occ_active)\n if FLAGS.gradient_clipping:\n gradients = clip_grad_norm(gradients,\n FLAGS.gradient_clipping_max_value)\n smurf.optimizer.apply_gradients(\n zip(gradients, variables))\n return losses\n\n @tf.function\n def distributed_train_step(dist_inputs):\n per_replica_losses = strategy.run(train_step, args=(dist_inputs,))\n output = {}\n for k, v in per_replica_losses.items():\n output[k] = strategy.reduce(tf.distribute.ReduceOp.SUM, v, axis=None)\n output[k] /= FLAGS.num_gpus\n if FLAGS.log_per_replica_values:\n if hasattr(v, 'values'):\n for i, value in enumerate(v.values):\n output[k + str(i)] = value\n return output\n\n # Train for an epoch and save the results.\n log = {}\n log['learning-rate'] = [learning_rate_fn()]\n for step in range(FLAGS.epoch_length):\n sys.stdout.write(f'{step},')\n sys.stdout.flush()\n start_time_data = time.time()\n distributed_inputs = train_it.next()\n stop_time_data = time.time()\n global_step = tf.compat.v1.train.get_or_create_global_step()\n global_step.assign(global_step + 1)\n logging.info('Step is %d', global_step.numpy())\n start_time_train = time.time()\n log_update = distributed_train_step(distributed_inputs)\n stop_time_train = time.time()\n log_update['data-time'] = (stop_time_data - start_time_data) * 1000\n log_update['train-time'] = (stop_time_train - start_time_train) * 1000\n\n for key in log_update:\n if key in log:\n log[key].append(log_update[key])\n else:\n log[key] = [log_update[key]]\n\n if FLAGS.checkpoint_dir and not FLAGS.no_checkpointing:\n smurf.save()\n\n smurf_plotting.print_log(log, epoch)\n if FLAGS.eval_on and FLAGS.evaluate_during_train:\n eval_results = evaluate(smurf)\n smurf_plotting.print_eval(eval_results)\n\n if current_step >= FLAGS.num_train_steps:\n break\n\n epoch += 1\n\n else:\n logging.info(\n 'Specify flag train_on to enable training to <format>:<path>;... .')\n logging.info('Just doing evaluation now.')\n eval_results = evaluate(smurf)\n if eval_results:\n logging.info(smurf_plotting.print_eval(eval_results))\n logging.info('Evaluation complete.')", "def _set_eval(self):\n\n if self.model.__dict__['training']:\n self.model.eval()", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def testing_phase(self):\r\n self.test_dataloader = self.get_dataloader(self.test_h5_path, \"test set\")\r\n self.get_ts_properties()\r\n\r\n self.restart_epoch = util.get_restart_epoch()\r\n print(f\"* Loading model from previous saved state (Epoch {self.restart_epoch}).\", flush=True)\r\n self.model = torch.load(\r\n self.C.job_dir + f\"model_restart_{self.restart_epoch}.pth\"\r\n )\r\n\r\n self.model.eval()\r\n with torch.no_grad():\r\n self.generate_graphs(n_samples=self.C.n_samples)\r\n\r\n print(\"* Evaluating model.\", flush=True)\r\n anal.evaluate_model(valid_dataloader=self.test_dataloader,\r\n train_dataloader=self.train_dataloader,\r\n nll_per_action=self.nll_per_action,\r\n model=self.model)\r\n\r\n self.print_time_elapsed()", "def _evaluate(self):\n logging.warning('-> evaluate EMPTY experiment...')", "def test(self):\n with torch.no_grad():\n self.forward()\n self.compute_visuals()", "def evaluate_model(model, ds_valid):\n print(\"-- Evaluate Model:\")\n for features, labels in ds_valid:\n valid_step(model, features, labels)\n logs = \"\\nValid Loss: {}, Valid Accuracy: {}\"\n tf.print(tf.strings.format(logs, (valid_loss.result(), valid_metric.result())))\n valid_loss.reset_states()\n train_metric.reset_states()\n valid_metric.reset_states()", "def test(self, dataset):\n model_path = os.path.join(self.check_point, 'model.pt')\n if not os.path.exists(model_path):\n raise Exception('Cannot find %s.' % model_path)\n\n self.model = torch.load(model_path)\n print(self.model)\n model_parameters = filter(lambda p: p.requires_grad, self.model.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n print(1.0 * params / (1000 * 1000))\n _, _, stats, outputs, names = self._check_PSNR(dataset, is_test=True)\n return stats, outputs, names", "def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds", "def test_evaluate(self):\n tf.reset_default_graph()\n if os.path.isdir(self.training_dir):\n shutil.rmtree(self.training_dir)\n\n nas_trainer = EarlyStopNASTrainer(\n encoded_network=self.net_nsc,\n input_shape=infer_data_shape(self.train_data),\n n_classes=infer_n_classes(self.train_labels),\n batch_size=self.batch_size,\n log_path=self.training_dir,\n mu=0.5,\n rho=0.5,\n variable_scope=\"cnn\"\n )\n\n nas_trainer.train(\n train_data=self.train_data,\n train_labels=self.train_labels,\n train_input_fn=\"default\"\n )\n\n res = nas_trainer.evaluate(\n eval_data=self.eval_data,\n eval_labels=self.eval_labels,\n eval_input_fn=\"default\"\n )\n\n self.assertTrue(os.path.isdir(self.training_dir))\n self.assertTrue(\"accuracy\" in list(res.keys()))\n\n self.assertTrue(nas_trainer.density is not None)\n self.assertTrue(nas_trainer.density != 0.)\n\n self.assertTrue(nas_trainer.weighted_log_density is not None)\n self.assertTrue(nas_trainer.weighted_log_density != 0.)\n\n self.assertTrue(nas_trainer.flops is not None)\n self.assertTrue(nas_trainer.flops != 0.)\n\n self.assertTrue(nas_trainer.weighted_log_flops is not None)\n self.assertTrue(nas_trainer.weighted_log_flops != 0.)", "def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric", "def test(self):\n self.eval()", "def evaluate(\n self,\n test_dataset: Union[Dataset, InstanceDataset],\n batch_size: int = 16,\n lazy: bool = False,\n output_dir: Optional[Union[str, Path]] = None,\n verbose: bool = True,\n ) -> Dict[str, Any]:\n trainer = Trainer(self, lazy=lazy)\n\n return trainer.test(\n test_dataset, batch_size=batch_size, output_dir=output_dir, verbose=verbose\n )", "def test(exp_dir, step, model, iterator):\n xin, xout, y, n = model.xin, model.xout, model.y, model.n\n sum_names = [\"lb\", \"log_px_z\", \"neg_kld_z1\", \"neg_kld_z2\", \"log_pmu2\"]\n sum_vars = map(tf.reduce_mean, [model.__dict__[name]\n for name in sum_names])\n\n # util fn\n def _feed_dict(x_val, y_val, n_val):\n return {xin: x_val, xout: x_val, y: y_val, n: n_val}\n\n saver = tf.train.Saver()\n with tf.Session(config=SESS_CONF) as sess:\n stime = time.time()\n restore_model(sess, saver, \"%s/models\" % exp_dir, step)\n print \"restore model takes %.2f s\" % (time.time() - stime)\n\n def _print_prog(sum_names, sum_vals):\n msg = \" \".join([\"%s=%.2f\" % p for p in zip(sum_names, sum_vals)])\n print msg\n if np.isnan(sum_vals[0]):\n if_diverged = True\n sys.stdout.flush()\n\n print(\"#\" * 40)\n vtime = time.time()\n sum_vals = _valid(sess, model, sum_vars, iterator)\n now = time.time()\n print(\"validation takes %.fs\" % (now - vtime,))\n _print_prog(sum_names, sum_vals)\n print(\"#\" * 40)", "def train_step(self, x_train, y_train):\n\n input_x_op = self.session.graph.get_operation_by_name(\"input_x\").outputs[0]\n input_y_op = self.session.graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob_op = self.session.graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n global_step_op = self.session.graph.get_operation_by_name(\"global_step\").outputs[0]\n\n optimizer_op = self.session.graph.get_operation_by_name(\"loss/optimizer\").outputs[0]\n loss_op = self.session.graph.get_operation_by_name(\"loss/loss\").outputs[0]\n\n d_ = {\n input_x_op: x_train,\n input_y_op: y_train\n }\n\n self.init_dataset(d_)\n\n train_batches_per_epoch = (len(x_train) - 1) // self.FLAGS.batch_size + 1\n\n sum_loss = 0\n for current_step in range (train_batches_per_epoch):\n\n if self.FLAGS.summary:\n _, step, summaries, loss = self.session.run(\n [optimizer_op, global_step_op, self.train_summary_op, loss_op], feed_dict={dropout_keep_prob_op: self.hyperparams['dropout_keep_prob']})\n \n self.train_summary_writer.add_summary(summaries, step)\n else:\n _, step, loss = self.session.run(\n [optimizer_op, global_step_op, loss_op], feed_dict={dropout_keep_prob_op: self.hyperparams['dropout_keep_prob']})\n \n sum_loss += loss\n\n time_str = datetime.datetime.now().isoformat()\n if (current_step + 1) % 10 == 0:\n print(\"{}: step {}/{}, loss {:g}\".format(time_str, current_step + 1, train_batches_per_epoch, loss))\n\n mean_loss = sum_loss/ train_batches_per_epoch\n\n return mean_loss", "def evaluate(data_folder):\n seq_length=150\n g = tf.Graph()\n with g.as_default():\n # Load dataset.\n provider = data_provider_kush.get_provider(FLAGS.task)(data_folder)\n num_classes = 6\n audio, ground_truth, num_examples = provider.get_split(FLAGS.portion, FLAGS.batch_size)\n\n # Define model graph.\n with slim.arg_scope([slim.batch_norm],\n is_training=False):\n predictions = models_kush.get_model(FLAGS.model,ground_truth)(audio, num_lstm_modules=FLAGS.num_lstm_modules)\n #pred_argmax = tf.argmax(predictions, 1) \n #lab_argmax = tf.argmax(labels, 1)\n\t\n metrics = {\n \"eval/accuracy\": slim.metrics.streaming_mean_squared_error(predictions, ground_truth[:,seq_length-1,:])\n }\n\n total_error = tf.reduce_sum(tf.square(tf.subtract(ground_truth[:,seq_length-1,:], tf.reduce_mean(ground_truth[:,seq_length-1,:]))))\n unexplained_error = tf.reduce_sum(tf.square(tf.subtract(ground_truth[:,seq_length-1,:], predictions)))\n R_squared = tf.subtract(tf.cast(1, tf.float32), tf.divide(total_error, unexplained_error))\n print('R_squared value: ',R_squared)\n for i in range(num_classes):\n name ='eval/mse_{}'.format(i)\n recall = slim.metrics.streaming_mean_squared_error(predictions[:,i],ground_truth[:,seq_length-1,i])\n metrics[name] = recall\n\n metrics['R_squared']=(R_squared,tf.subtract(tf.cast(1, tf.float32), tf.div(total_error, unexplained_error)))\n #print(zip(metrics.values()))\n #metric_names = metrics.keys()\n #value_ops, update_ops = zip(*metrics.values())\n #names_to_values, names_to_updates = dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))\n names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(metrics)\n \n summary_ops = []\n metrics = dict()\n for name, value in names_to_values.items():\n op = tf.summary.scalar(name, value)\n op = tf.Print(op, [value], name)\n summary_ops.append(op)\n metrics[name] = value\n\n # Computing the unweighted average recall and add it into the summaries.\n uar = sum([metrics['eval/mse_{}'.format(i)] for i in range(num_classes)]) / num_classes\n op = tf.summary.scalar('eval/mse', uar)\n op = tf.Print(op, [uar], 'eval/mse')\n summary_ops.append(op)\n\n num_examples = FLAGS.num_examples or num_examples\n num_batches = math.ceil(num_examples / float(FLAGS.batch_size))\n logging.set_verbosity(1)\n\n # Setup the global step.\n slim.get_or_create_global_step()\n\n # How often to run the evaluation.\n eval_interval_secs = FLAGS.eval_interval_secs \n\n slim.evaluation.evaluation_loop(\n '',\n FLAGS.checkpoint_dir,\n FLAGS.log_dir,\n num_evals=num_batches,\n eval_op=list(names_to_updates.values()),\n summary_op=tf.summary.merge(summary_ops),\n eval_interval_secs=eval_interval_secs)", "def run_evaluation(\n self,\n training_set,\n validation_set,\n test_set,\n progress_tracker: ProgressTracker,\n train_summary_writer,\n validation_summary_writer,\n test_summary_writer,\n model_hyperparameters_path,\n output_features,\n metrics_names,\n save_path,\n loss: torch.Tensor,\n all_losses: Dict[str, torch.Tensor],\n early_stopping_steps: int,\n checkpoint_manager: CheckpointManager,\n ) -> bool:\n start_time = time.time()\n self.callback(lambda c: c.on_eval_start(self, progress_tracker, save_path))\n\n progress_tracker.checkpoint_number += 1\n if self.is_coordinator():\n logger.info(f\"\\nRunning evaluation for step: {progress_tracker.steps}, epoch: {progress_tracker.epoch}\")\n\n # ================ Eval ================\n # eval metrics on train\n self.eval_batch_size = max(self.eval_batch_size, progress_tracker.batch_size)\n\n if self.evaluate_training_set:\n # Run a separate pass over the training data to compute metrics\n self.evaluation(\n training_set, \"train\", progress_tracker.train_metrics, self.eval_batch_size, progress_tracker\n )\n else:\n # Use metrics accumulated during training\n metrics = self.model.get_metrics()\n append_metrics(self.model, \"train\", metrics, progress_tracker.train_metrics, progress_tracker)\n self.model.reset_metrics()\n\n self.write_eval_summary(\n summary_writer=train_summary_writer,\n metrics=progress_tracker.train_metrics,\n step=progress_tracker.steps,\n )\n\n if validation_set is not None:\n self.callback(lambda c: c.on_validation_start(self, progress_tracker, save_path))\n\n # eval metrics on validation set\n self.evaluation(\n validation_set,\n VALIDATION,\n progress_tracker.validation_metrics,\n self.eval_batch_size,\n progress_tracker,\n )\n\n self.write_eval_summary(\n summary_writer=validation_summary_writer,\n metrics=progress_tracker.validation_metrics,\n step=progress_tracker.steps,\n )\n\n self.callback(lambda c: c.on_validation_end(self, progress_tracker, save_path))\n\n if test_set is not None:\n self.callback(lambda c: c.on_test_start(self, progress_tracker, save_path))\n\n # eval metrics on test set\n self.evaluation(test_set, TEST, progress_tracker.test_metrics, self.eval_batch_size, progress_tracker)\n\n self.write_eval_summary(\n summary_writer=test_summary_writer,\n metrics=progress_tracker.test_metrics,\n step=progress_tracker.steps,\n )\n\n self.callback(lambda c: c.on_test_end(self, progress_tracker, save_path))\n\n elapsed_time = (time.time() - start_time) * 1000.0\n\n if self.is_coordinator():\n logger.info(f\"Evaluation took {time_utils.strdelta(elapsed_time)}\\n\")\n print_metrics_table(\n output_features,\n progress_tracker.train_metrics,\n progress_tracker.validation_metrics,\n progress_tracker.test_metrics,\n )\n\n # ================ Validation Logic ================\n should_break = False\n if validation_set is not None and validation_set.size > 0:\n should_break = self.check_progress_on_validation(\n progress_tracker,\n self.validation_field,\n self.validation_metric,\n save_path,\n model_hyperparameters_path,\n self.increase_batch_size_on_plateau,\n self.increase_batch_size_on_plateau_patience,\n self.increase_batch_size_on_plateau_rate,\n self.max_batch_size,\n self.increase_batch_size_eval_metric,\n self.increase_batch_size_eval_split,\n early_stopping_steps,\n self.skip_save_model,\n checkpoint_manager,\n )\n else:\n # There's no validation, so we save the model.\n if not self.skip_save_model:\n logger.info(\"Saving model.\\n\")\n checkpoint_manager.save_best(progress_tracker.steps)\n self.callback(lambda c: c.on_save_best_checkpoint(self, progress_tracker, save_path))\n\n # Trigger eval end callback after any model weights save for complete checkpoint\n self.callback(lambda c: c.on_eval_end(self, progress_tracker, save_path))\n\n # Clear the CUDA cache to free up memory\n torch.cuda.empty_cache()\n\n return should_break", "def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)", "def evaluate(model, val_data, epoch):\n print('validating')\n\n # 设置为评估模式 \n model.eval() \n\n val_loss = []\n with torch.no_grad():\n DEVICE = config.DEVICE\n\n val_dataloader = DataLoader(dataset=val_data,\n batch_size=config.batch_size,\n shuffle=True,\n pin_memory=True, drop_last=True,\n collate_fn=collate_fn)\n\n for batch, data in enumerate(tqdm(val_dataloader)):\n\n x, y, x_len, y_len, oov, len_oovs = data\n\n if config.is_cuda:\n x = x.to(DEVICE)\n y = y.to(DEVICE)\n x_len = x_len.to(DEVICE)\n len_oovs = len_oovs.to(DEVICE)\n\n loss = model(x, x_len, y, len_oovs, batch=batch, \n num_batches=len(val_dataloader),\n teacher_forcing=True)\n\n val_loss.append(loss.item())\n\n return np.mean(val_loss)", "def eval_fn(dummy_state, batch):\n model.forward_pass(batch, training=False)\n return dummy_state", "def train_step(self, sess, task_a_data):\n raise NotImplemented()", "def test_eval(model, test_set):\n num_test_batch = len(test_set)\n test_loss = np.zeros((num_test_batch, 1), dtype=float)\n test_acc = np.zeros((num_test_batch, 1), dtype=float)\n for ibatch, batch in enumerate(test_set):\n result = model.test_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n test_loss[ibatch] = result[0]\n test_acc[ibatch] = result[-1]\n return np.mean(test_loss), np.mean(test_acc)", "def E_step_precompute(self, model_params, my_suff_stat, my_data):", "def run_evaluation(params):\n\n old_stdout = sys.stdout\n log_file = open(params.output,'w')\n sys.stdout = log_file\n\n batch_size = params.batch_size\n num_particles = params.num_particles\n trajlen = params.trajlen\n num_batches = dataset_size() // batch_size\n\n # evaluation data\n test_ds = datautils.get_dataflow(params.testfiles, params.batch_size, is_training=False)\n\n # pf model\n model = pfnet.pfnet_model(params)\n\n # load model from checkpoint file\n if params.load:\n print(\"=====> Loading model from \" + params.load)\n model.load_weights(params.load)\n\n # repeat for a fixed number of epochs\n for epoch in range(params.epochs):\n mse_list = []\n success_list = []\n itr = test_ds.as_numpy_iterator()\n # run evaluation over all evaluation samples in an epoch\n for idx in tqdm(range(num_batches)):\n raw_record = next(itr)\n data_sample = datautils.transform_raw_record(raw_record, params)\n\n observations = tf.convert_to_tensor(data_sample['observation'], dtype=tf.float32)\n odometry = tf.convert_to_tensor(data_sample['odometry'], dtype=tf.float32)\n true_states = tf.convert_to_tensor(data_sample['true_states'], dtype=tf.float32)\n global_map = tf.convert_to_tensor(data_sample['global_map'], dtype=tf.float32)\n init_particles = tf.convert_to_tensor(data_sample['init_particles'], dtype=tf.float32)\n init_particle_weights = tf.constant(np.log(1.0/float(num_particles)),\n shape=(batch_size, num_particles), dtype=tf.float32)\n\n # start trajectory with initial particles and weights\n state = [init_particles, init_particle_weights, global_map]\n\n # if stateful: reset RNN s.t. initial_state is set to initial particles and weights\n # if non-stateful: pass the state explicity every step\n if params.stateful:\n model.layers[-1].reset_states(state) # RNN layer\n\n input = [observations, odometry]\n model_input = (input, state)\n\n # forward pass\n output, state = model(model_input, training=False)\n\n # compute loss\n particle_states, particle_weights = output\n loss_dict = pfnet_loss.compute_loss(particle_states, particle_weights, true_states, params.map_pixel_in_meters)\n\n # we have squared differences along the trajectory\n mse = np.mean(loss_dict['coords'])\n mse_list.append(mse)\n\n # localization is successfull if the rmse error is below 1m for the last 25% of the trajectory\n successful = np.all(loss_dict['coords'][-trajlen//4:] < 1.0 ** 2) # below 1 meter\n success_list.append(successful)\n\n # report results\n mean_rmse = np.mean(np.sqrt(mse_list)) * 100\n total_rmse = np.sqrt(np.mean(mse_list)) * 100\n mean_success = np.mean(np.array(success_list, 'i')) * 100\n print(f'Mean RMSE (average RMSE per trajectory) = {mean_rmse:03.3f} cm')\n print(f'Overall RMSE (reported value) = {total_rmse:03.3f} cm')\n print(f'Success rate = {mean_success:03.3f} %')\n\n sys.stdout = old_stdout\n log_file.close()\n print('evaluation finished')", "def evaluate_dataset(self, net, criterion, data_loader):\n is_training = net.training # Remember if the network is training or not\n\n net.eval() # Set to eval mode\n\n running_loss = 0.\n counter = 0\n print('Make sure this is set correctly')\n decode_type = 'single_frame' # Always use a decode_type of 'single_frame'\n use_single_frame = decode_type == 'single_frame'\n decode_type = self.get_decode_type(decode_type)\n last_batch = None\n localized = False # For graph net eval\n last_predicted_node_name = None # For graph net eval\n last_predicted_behavior_id = None # For graph net eval\n if isinstance(net, PhaseRNN):\n output = torch.tensor([[0.]], dtype=torch.float32)\n with torch.no_grad():\n for i, batch in enumerate(data_loader):\n raw_batch = batch # Only used for GraphNet\n vel, depth = decode_batch(batch, decode_type, self.cfg)\n\n # Sanity check\n if not isinstance(net, GraphNet):\n # Check that batch size is 1\n batch_size = vel.size(0) # Compute batch size\n assert batch_size == 1\n\n # Check if this is the beginning of a new episode\n if isinstance(net, PhaseRNN):\n is_new_episode = (Evaluator.is_new_episode(last_batch, batch)\n or self.is_new_episode(output))\n elif isinstance(net, GraphNet):\n if not isinstance(batch, dict): # Graph info provided\n has_graph_info = True\n batch = vel[1]['batch']\n else:\n has_graph_info = False\n # While we have not localized the agent in this episode yet, keep the\n # is_new_episode flag to True. Note that localized could be set to False from a\n # previous iteration.\n is_new_episode = self.is_new_episode(last_batch, batch) or not localized\n else:\n is_new_episode = self.is_new_episode(last_batch, batch)\n\n if is_new_episode:\n localized = False # For graph net eval\n last_predicted_node_name = None # For graph net eval\n last_predicted_behavior_id = None # For graph net eval\n\n # Update\n last_batch = batch\n\n # If GraphNet, check if localized (AKA if graph info has been provided)\n if isinstance(net, GraphNet) and (has_graph_info is True):\n localized = True # For graph net eval\n\n if isinstance(net, GraphNet) and (localized is True):\n # Agent has been localized\n # Set up the input to the network (combine it all into the depth variable)\n if (is_new_episode is True) or True and (vel is not None):\n cur_area_graph = self.sem_graphs[batch['area_name'][0]]\n initial_graph_net_input, _ = vel\n depth = {\n 'depth': depth,\n 'graph_net_input': initial_graph_net_input,\n }\n else:\n # We are in the middle of a rollout for this episode. Provide the subgraph\n # based on the previous localization prediction.\n\n cur_graph_net_input = self.construct_graph_net_input(\n self.cfg,\n cur_area_graph,\n last_predicted_node_name,\n last_predicted_behavior_id,\n decode_type,\n batch,\n )\n\n depth = {\n 'depth': depth,\n 'graph_net_input': cur_graph_net_input,\n }\n\n # Start evaluating the sequence/episode once we have found the starting node/position\n if isinstance(net, GraphNet) and (localized is False):\n continue\n\n # Sanity check\n if isinstance(net, GraphNet) and (localized is False):\n assert torch.unique(depth['graph_net_input']['graph_idx_of_node']) == 0\n assert torch.unique(depth['graph_net_input']['graph_idx_of_edge']) == 0\n\n output = self.predict(net, depth, is_new_episode)\n if isinstance(net, GraphNet):\n # Decode the output of GraphNetEvaluator.predict() and update the predicted location\n output, last_predicted_node_name, last_predicted_behavior_id = output\n\n if has_graph_info is False:\n # We cannot compare with GT since no ground truth is provided\n continue\n\n # Use a different criterion (from training) for evaluating the GraphNet\n # Ignore the input criterion and measure accuracy instead\n gt_graph_net_dict, target_output = vel\n node_names_of_gt_subgraph = gt_graph_net_dict['node_names']\n edge_categories_of_gt_subgraph = gt_graph_net_dict['edge_categories']\n assert len(target_output['gt_node_idx']) == 1\n assert len(target_output['gt_edge_idx']) == 1\n gt_node_name = node_names_of_gt_subgraph[target_output['gt_node_idx'][0]]\n gt_behavior_category_enum = BehaviorCategory(int(edge_categories_of_gt_subgraph[target_output['gt_edge_idx'][0]]))\n gt_behavior_id = gt_behavior_category_enum.name\n\n assert self.cfg.gn_classification == 'edge'\n loss = (gt_node_name == last_predicted_node_name) and (gt_behavior_id == last_predicted_behavior_id)\n loss = float(loss)\n else:\n loss = criterion(output, vel)\n\n # Update counters\n counter += 1\n if isinstance(loss, float):\n running_loss += loss\n else:\n running_loss += loss.item()\n\n if (i + 1) % self.cfg.print_freq == 0:\n print(' evaluated %d iterations: %f' % (i + 1, running_loss / counter))\n\n # Display the prediction\n if self.cfg.visualize_results is True:\n # Modify dataset_item to include the prediction\n if isinstance(net, GraphNet):\n prediction_as_str = self.prediction2str(output, last_predicted_node_name, last_predicted_behavior_id)\n else:\n prediction_as_str = self.prediction2str(output)\n\n if isinstance(net, GraphNet) and (has_graph_info is True): # Make sure to visualize graph info if provided\n dataset_item = raw_batch\n dataset_item[0]['prediction_str'] = prediction_as_str\n else:\n dataset_item = batch\n dataset_item['prediction_str'] = prediction_as_str\n\n # Visualize\n to_break = self.dataset_visualizer.visualize_data_loader_item(dataset_item, use_frame_by_frame=use_single_frame)\n if to_break:\n break\n\n if is_training:\n net.train() # Set to train mode\n return running_loss / counter", "def evaluate():\n log.info('Loading dev data...')\n if args.version_2:\n dev_data = SQuAD('dev', version='2.0')\n else:\n dev_data = SQuAD('dev', version='1.1')\n (_, _), (data_file_name, _) \\\n = dev_data._data_file[dev_data._version][dev_data._segment]\n dev_data_path = os.path.join(dev_data._root, data_file_name)\n\n if args.debug:\n sampled_data = [dev_data[0], dev_data[1], dev_data[2]]\n dev_data = mx.gluon.data.SimpleDataset(sampled_data)\n log.info('Number of records in dev data: %d', len(dev_data))\n\n dev_data_features = preprocess_dataset(\n tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride, num_workers=args.num_workers,\n max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,\n feature_file=args.dev_dataset_file)\n\n dev_data_input = convert_full_features_to_input_features(dev_data_features)\n log.info('The number of examples after preprocessing: %d', len(dev_data_input))\n\n dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,\n num_workers=4, batch_size=args.test_batch_size,\n shuffle=False, last_batch='keep')\n\n log.info('start prediction')\n\n all_results = collections.defaultdict(list)\n\n epoch_tic = time.time()\n total_num = 0\n for (batch_id, data) in enumerate(dev_dataloader):\n data_list = list(split_and_load(data, ctx))\n for splited_data in data_list:\n example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data\n total_num += len(inputs)\n outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)\n example_ids = example_ids.asnumpy().tolist()\n for c, example_ids in enumerate(example_ids):\n result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),\n start_top_index=outputs[1][c].asnumpy().tolist(),\n end_top_log_probs=outputs[2][c].asnumpy().tolist(),\n end_top_index=outputs[3][c].asnumpy().tolist(),\n cls_logits=outputs[4][c].asnumpy().tolist())\n all_results[example_ids].append(result)\n if batch_id % args.log_interval == 0:\n log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))\n\n epoch_toc = time.time()\n log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,\n total_num / (epoch_toc - epoch_tic))\n\n log.info('Get prediction results...')\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n for features in dev_data_features:\n results = all_results[features[0].example_id]\n example_qas_id = features[0].qas_id\n score_diff, best_non_null_entry, nbest_json = predict_extended(\n features=features, results=results, n_best_size=args.n_best_size,\n max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,\n end_n_top=args.end_top_n)\n scores_diff_json[example_qas_id] = score_diff\n all_predictions[example_qas_id] = best_non_null_entry\n all_nbest_json[example_qas_id] = nbest_json\n\n output_prediction_file = os.path.join(args.output_dir, 'predictions.json')\n output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')\n output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')\n\n with open(output_prediction_file, 'w') as writer:\n writer.write(json.dumps(all_predictions, indent=4) + '\\n')\n with open(output_nbest_file, 'w') as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + '\\n')\n with open(output_null_log_odds_file, 'w') as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + '\\n')\n\n if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):\n arguments = [\n dev_data_path, output_prediction_file, '--na-prob-thresh',\n str(args.null_score_diff_threshold)\n ]\n if args.version_2:\n arguments += ['--na-prob-file', output_null_log_odds_file]\n subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)\n else:\n log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '\n 'Check index.rst for the detail.')", "def test_evaluate(self):\n tf.reset_default_graph()\n if os.path.isdir(self.training_dir):\n shutil.rmtree(self.training_dir)\n\n nas_trainer = DefaultNASTrainer(\n encoded_network=self.net_nsc,\n input_shape=infer_data_shape(self.train_data),\n n_classes=infer_n_classes(self.train_labels),\n batch_size=self.batch_size,\n log_path=self.training_dir,\n variable_scope=\"cnn\"\n )\n\n nas_trainer.train(\n train_data=self.train_data,\n train_labels=self.train_labels,\n train_input_fn=\"default\"\n )\n\n res = nas_trainer.evaluate(\n eval_data=self.eval_data,\n eval_labels=self.eval_labels,\n eval_input_fn=\"default\"\n )\n\n self.assertTrue(os.path.isdir(self.training_dir))\n self.assertTrue(\"accuracy\" in list(res.keys()))", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, input_graph, dataloader, postprocess=None,\n metric=None, measurer=None, iteration=-1, tensorboard=False):\n logger.info(\"start to evaluate model....\")\n import tensorflow as tf\n from .tf_utils.graph_rewriter.generic.pre_optimize import PreOptimization\n\n graph = tf.Graph()\n graph_def = PreOptimization(input_graph, self.inputs, \\\n self.outputs).get_optimized_graphdef()\n assert graph_def\n with graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n outputs = copy.deepcopy(self.outputs)\n if tensorboard:\n from .tf_utils.graph_rewriter.graph_util import GraphAnalyzer\n from tensorflow.python.framework import tensor_util\n\n output_postfix = \"_fp32.output\"\n inspect_node_types = [\"Conv2D\", \"DepthwiseConv2dNative\", \"MaxPool\", \"AvgPool\",\n \"ConcatV2\", \"MatMul\", \"FusedBatchNormV3\", \"BiasAdd\",\n \"Relu\", \"Relu6\", \"Dequantize\"]\n fp32_inspect_node_name = []\n int8_inspect_node_name = []\n q_node_scale = {}\n if self.dump_times == 0:\n temp_dir = \"./runs/eval/baseline\"\n else:\n temp_dir = \"./runs/eval/tune_\" + str(self.dump_times)\n if os.path.isdir(temp_dir):\n import shutil\n shutil.rmtree(temp_dir, ignore_errors=True)\n writer = tf.compat.v1.summary.FileWriter(temp_dir, graph)\n\n cur_graph = GraphAnalyzer()\n cur_graph.graph = graph_def\n cur_graph.parse_graph()\n graph_info = cur_graph.node_name_details\n for node in graph_def.node:\n if node.op in inspect_node_types:\n fp32_inspect_node_name.append(node.name)\n elif node.op.find(\"Requantize\") != -1:\n out_min = -2\n out_max = -1\n if node.op.find(\"Sum\") != -1:\n out_min = -5\n out_max = -4\n q_out_min = graph_info[node.input[out_min]\n ].node.attr[\"value\"].tensor.float_val[0]\n q_out_max = graph_info[node.input[out_max]\n ].node.attr[\"value\"].tensor.float_val[0]\n q_node_scale[node.name] = (node.op, q_out_min, q_out_max)\n int8_inspect_node_name.append(node.name)\n # Inspect weights, bias. Need further optimize\n if node.op == \"Const\" and (graph_info[graph_info[node.name].outputs[0]].node.op in\n [\"Conv2D\", \"DepthwiseConv2dNative\", \"MatMul\", \"FusedBatchNormV3\", \"BiasAdd\"]):\n const_value = tensor_util.MakeNdarray(node.attr.get('value').tensor)\n self.log_histogram(writer, node.name, const_value)\n\n outputs.extend(fp32_inspect_node_name)\n if len(int8_inspect_node_name) > 0:\n output_postfix = \"_int8.output\"\n outputs.extend(int8_inspect_node_name)\n input_tensor = [\n self.get_tensor_by_name_with_import(graph, x + \":0\") for x in self.inputs \\\n ]\n output_tensor = [\n self.get_tensor_by_name_with_import(graph, x + \":0\") for x in outputs\n ]\n\n config = tf.compat.v1.ConfigProto()\n config.use_per_session_threads = 1\n # config.intra_op_parallelism_threads = 28\n config.inter_op_parallelism_threads = 1\n sess_graph = tf.compat.v1.Session(graph=graph, config=config)\n\n logger.info(\"Start to evaluate model via tensorflow...\")\n for idx, (inputs, labels) in enumerate(dataloader):\n # dataloader should keep the order and len of inputs same with input_tensor\n if len(input_tensor) == 1:\n feed_dict = {input_tensor[0]: inputs} # get raw tensor using index [0]\n else:\n assert len(input_tensor) == len(inputs), \\\n 'inputs len must equal with input_tensor'\n feed_dict = dict(zip(input_tensor, inputs))\n\n if measurer is not None:\n measurer.start()\n predictions = sess_graph.run(output_tensor, feed_dict) \n measurer.end()\n else:\n predictions = sess_graph.run(output_tensor, feed_dict)\n # Inspect node output, just get 1st iteration output tensors for now\n if idx == 0 and tensorboard:\n for index, node_name in enumerate(outputs):\n tensor = predictions[index]\n if node_name in int8_inspect_node_name:\n tensor = self._dequantize(predictions[index], q_node_scale[node_name])\n self.log_histogram(writer, node_name + output_postfix, tensor, idx)\n writer.close()\n if postprocess is not None:\n predictions, labels = postprocess((predictions, labels))\n if metric is not None:\n metric.update(predictions[0], labels)\n if idx + 1 == iteration:\n break\n acc = metric.result() if metric is not None else 0\n if tensorboard:\n new_dir = temp_dir + \"_acc_\" + str(acc)\n writer.close()\n if os.path.isdir(new_dir):\n import shutil\n shutil.rmtree(new_dir, ignore_errors=True)\n os.rename(temp_dir, new_dir)\n self.dump_times += 1\n sess_graph.close()\n return acc", "def eval(self, dataset=None, criterion=None):\n # Recover the defaults, if missing\n dataset, criterion = self._resolve_defaults(testset=dataset, criterion=criterion)\n # Sample the test batch\n inputs, targets = dataset.sample(self._config)\n # Compute and return the evaluation result\n return criterion(self.run(inputs), targets)", "def test_training(self):\n\t\tpass", "def pipeline_test_data(self):\n if self.linearity:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n #'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n else:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n 'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n\n self.pre_dark_file = os.path.join(self.output_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.output_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.output_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.output_dir, 'step_rate.fits')", "def test_best_val(self, te_acc):\n self.test_val = te_acc", "def test(model, data_loader, num_train_batches, epoch, test_mloss, test_rloss, test_acc, directory):\r\n print('===> Evaluate mode')\r\n\r\n # Switch to evaluate mode\r\n model.eval()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n loss = 0\r\n margin_loss = 0\r\n recon_loss = 0\r\n\r\n correct = 0\r\n\r\n num_batches = len(data_loader)\r\n\r\n global_step = epoch * num_train_batches + num_train_batches\r\n\r\n start_time = timer()\r\n\r\n for data, target in data_loader:\r\n with torch.no_grad():\r\n batch_size = data.size(0)\r\n target_indices = target\r\n target_one_hot = utils.one_hot_encode(target_indices, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n target = target_one_hot\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n target_indices.to(args.device)\r\n\r\n # Output predictions\r\n output, reconstruction = model(data, target_indices, False) # output from DigitCaps (out_digit_caps)\r\n\r\n # Sum up batch loss\r\n t_loss, m_loss, r_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss += t_loss.data\r\n margin_loss += m_loss.data\r\n recon_loss += r_loss.data\r\n\r\n # Count number of correct predictions\r\n # v_magnitude shape: [128, 10, 1, 1]\r\n v_magnitude = torch.sqrt((output**2).sum(dim=2, keepdim=True))\r\n # pred shape: [128, 1, 1, 1]\r\n pred = v_magnitude.data.max(1, keepdim=True)[1].cpu()\r\n correct += pred.eq(target_indices.view_as(pred)).sum()\r\n\r\n\r\n # Get the reconstructed images of the last batch\r\n if args.use_reconstruction_loss:\r\n reconstruction = model.decoder(output, target_indices, False)\r\n # Input image size and number of channel.\r\n # By default, for MNIST, the image width and height is 28x28 and 1 channel for black/white.\r\n image_width = args.input_width\r\n image_height = args.input_height\r\n image_channel = args.num_conv_in_channels\r\n recon_img = reconstruction.view(-1, image_channel, image_width, image_height)\r\n assert recon_img.size() == torch.Size([batch_size, image_channel, image_width, image_height])\r\n\r\n # Save the image into file system\r\n utils.save_image(recon_img, directory / ('recons_image_test_{}_{}.png'.format(epoch, global_step)))\r\n utils.save_image(data, directory /\r\n ('original_image_test_{}_{}.png'.format(epoch, global_step)))\r\n\r\n end_time = timer()\r\n\r\n # Log test losses\r\n loss /= num_batches\r\n margin_loss /= num_batches\r\n recon_loss /= num_batches\r\n\r\n # Log test accuracies\r\n num_test_data = len(data_loader.dataset)\r\n accuracy = correct / num_test_data\r\n accuracy_percentage = float(correct) * 100.0 / float(num_test_data)\r\n\r\n test_mloss.write('%.6f \\n' % margin_loss)\r\n test_rloss.write('%.6f \\n' % recon_loss)\r\n test_acc.write('%.4f \\n' % accuracy_percentage)\r\n\r\n # Print test losses and accuracy\r\n print('Test: [Loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f}]'.format(\r\n loss,\r\n margin_loss,\r\n recon_loss if args.use_reconstruction_loss else 0))\r\n print('Test Accuracy: {}/{} ({:.2f}%)\\n'.format(\r\n correct, num_test_data, accuracy_percentage))\r\n\r\n\r\n global avg_testing_time_per_epoch\r\n avg_testing_time_per_epoch = (\r\n avg_testing_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n global best_acc\r\n global best_acc_epoch\r\n if accuracy_percentage > best_acc:\r\n best_acc = accuracy_percentage\r\n best_acc_epoch = epoch\r\n test_loader = data_loader\r\n utils.dump(utils.make_full_checkpoint_obj(locals(), globals()), directory / 'trained_model/FP32_model')", "def train(self):\n p = self._params\n if self.train_data != None:\n tens_to_log = self.params.tens_to_log\n logging_hook = tf.train.LoggingTensorHook(tensors = tens_to_log,\n every_n_iter = p.logging_step,\n )\n t_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.train_data[\"x\"]},\n y = self.train_data[\"y\"],\n batch_size = p.batch_size,\n num_epochs = None,\n shuffle = True,\n )\n self._model.train(input_fn = t_fn,\n steps = self.params.training_steps,\n hooks = [logging_hook],\n )\n \n if self.eval_data != None:\n e_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.eval_data[\"x\"]},\n y = self.eval_data[\"y\"],\n num_epochs = 1,\n shuffle = False,\n )\n eval_results = self.model.evaluate(input_fn = e_fn,\n checkpoint_path = self.model_dir,\n )\n print(eval_results)", "def evaluate(self, X_test, y_test):\n y_pred_train = self.pipeline.predict(self.X)\n mse_train = mean_squared_error(self.y, y_pred_train)\n rmse_train = np.sqrt(mse_train)\n \n self.mlflow_log_metric('rmse_train', rmse_train)\n \n y_pred_test = self.pipeline.predict(X_test)\n mse_test = mean_squared_error(y_test, y_pred_test)\n rmse_test = np.sqrt(mse_test)\n self.mlflow_log_metric('rmse_test', rmse_test)\n \n return (round(rmse_train, 3) ,round(rmse_test, 3))", "def test(self,test_fn, eval_metrics):\n # Load gold and predict\n X, Y = self.load_dataset(test_fn)\n y = self.model.predict(X)\n\n # Get most probable predictions and flatten\n Y = RNNOIE_model.consolidate_labels(self.transform_output_probs(Y).flatten())\n y = RNNOIE_model.consolidate_labels(self.transform_output_probs(y).flatten())\n\n # Run evaluation metrics and report\n # TODO: is it possible to compare without the padding?\n ret = []\n for (metric_name, metric_func) in eval_metrics:\n ret.append((metric_name, metric_func(Y, y)))\n # logging.debug(\"calculating {}\".format(ret[-1]))\n\n for (metric_name, metric_val) in ret:\n logging.info(\"{}: {:.4f}\".format(metric_name,\n metric_val))\n return Y, y, ret", "def test(self):\n self.model.eval()\n\n for step, sample in enumerate(self.test_loader):\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n self.test_losses.append(loss.item())\n\n if step % (max(8, len(self.test_loader)) // 8) == 0:\n out_img = torch.cat([x[0], torch.clamp(y_pred[0], 0, 1)], dim=2)\n self.sw.add_image(tag=f'sample_{step}', img_tensor=out_img, global_step=self.epoch)\n\n # log average loss on test set\n mean_test_loss = np.mean(self.test_losses)\n self.test_losses = []\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ patience: ', end='')\n self.sw.add_scalar(tag='test_loss', scalar_value=mean_test_loss, global_step=self.epoch)\n\n # save best model and update training patience\n if self.best_test_loss is None or mean_test_loss < self.best_test_loss:\n self.best_test_loss = mean_test_loss\n self.patience = conf.FX_PATIENCE\n torch.save(self.model.state_dict(), self.log_path / 'best.pth')\n else:\n self.patience = self.patience - 1\n print(f'{self.patience}/{conf.FX_PATIENCE}')\n\n if self.patience == 0:\n self.show_completion_msg()", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def testing_call(self, inputs, y, branch):\n self.eval()\n with torch.no_grad():\n out = self.forward(inputs, branch=branch)\n\n # Loss computation\n loss_obj = self.branch_losses[branch]\n loss = loss_obj(out, y)\n return out, loss.item()", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def evaluate(network, loss_function, softmax_function, test_loader, test_set_size):\n running_loss = 0.0\n confusion_matrix = { # Of shape [predicted value][real value]\n 0: {0: 0, 1: 0, 2: 0},\n 1: {0: 0, 1: 0, 2: 0},\n 2: {0: 0, 1: 0, 2: 0},\n }\n batch_size = -1\n network.eval()\n with torch.no_grad():\n correct = 0\n for graph_batch, label_batch in test_loader:\n if batch_size == -1:\n batch_size = label_batch.size(0)\n logits = network(graph_batch, graph_batch.ndata['n_feat'], graph_batch.edata['e_feat'], 0, 0)\n running_loss += loss_function(logits, label_batch).detach().item()\n predicted_classes = torch.argmax(logits, dim=1).detach()\n correct += (predicted_classes == label_batch).sum().item()\n for predicted_class, label in zip(predicted_classes, label_batch):\n confusion_matrix[predicted_class.item()][label.item()] += 1\n\n if batch_size <= 0:\n print(\"Error : batch size is {}\".format(batch_size))\n exit(1)\n\n return correct / test_set_size, running_loss / len(test_loader), confusion_matrix", "def test(self, test_fn, eval_metrics):\n # Load gold and predict\n X, Y = self.load_dataset(test_fn)\n y = self.model.predict(X)\n\n # Get most probable predictions and flatten\n Y = RNN_model.consolidate_labels(self.transform_output_probs(Y).flatten())\n y = RNN_model.consolidate_labels(self.transform_output_probs(y).flatten())\n\n # Run evaluation metrics and report\n # TODO: is it possible to compare without the padding?\n ret = []\n for (metric_name, metric_func) in eval_metrics:\n ret.append((metric_name, metric_func(Y, y)))\n logging.debug(\"calculating {}\".format(ret[-1]))\n\n for (metric_name, metric_val) in ret:\n logging.info(\"{}: {:.4f}\".format(metric_name,\n metric_val))\n return Y, y, ret", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def make_eval_step(self):\n def eval_step(model, example):\n with flax.deprecated.nn.stateful() as state:\n logits = model(example, train=False)\n if self.info.supervised_keys[-1] == 'error_type':\n targets = example['error_type'][:, None]\n else:\n targets = example['target_output']\n state = {k: v['tag'] for k, v in state.as_dict().items()}\n return self.compute_metrics(logits, targets, None), logits, state\n\n return eval_step", "def do_eval(sess,model,valid,batch_size):\n valid_X,valid_y,valid_p=valid\n number_examples=valid_X.shape[0]\n if number_examples>10000:\n number_examples=validation_size\n print(\"do_eval.valid.number_examples:\",number_examples)\n if number_examples>validation_size: valid_X,valid_y,valid_p=valid_X[0:validation_size],valid_y[0:validation_size],valid_p[0:validation_size]\n eval_loss,eval_counter,eval_acc=0.0,0,0.0\n for start,end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):\n feed_dict = {model.x_mask_lm: valid_X[start:end],model.y_mask_lm: valid_y[start:end],model.p_mask_lm:valid_p[start:end],\n model.dropout_keep_prob: 1.0} # FLAGS.dropout_keep_prob\n curr_eval_loss, logits_lm, accuracy_lm= sess.run([model.loss_val_lm,model.logits_lm,model.accuracy_lm],feed_dict) # logits:[batch_size,label_size]\n eval_loss=eval_loss+curr_eval_loss\n eval_acc=eval_acc+accuracy_lm\n eval_counter=eval_counter+1\n return eval_loss/float(eval_counter+small_value), eval_acc/float(eval_counter+small_value)", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def test(self, test=False): \n if test == True:\n if os.path.exists(self.student_save_path):\n checkpoint = torch.load(self.student_save_path, map_location=self.device)\n else:\n raise ValueError('No file with the pretrained model selected')\n\n self.student_model.load_state_dict(checkpoint)\n self.student_model.eval()\n\n running_acc = 0\n with torch.no_grad():\n for data, label in self.testloader:\n data, label = data.to(self.device), label.to(self.device)\n\n student_logits, *student_activations = self.student_model(data)\n\n running_acc += utils.accuracy(student_logits.data, label)\n\n print(f\"Test accuracy: {running_acc / len(self.testloader)}\")\n return running_acc / len(self.testloader)", "def evaluate(self, output_dir, test_data, device, verbose_logging=False):\r\n tokenizer = self.tokenizer\r\n # device = torch.device(\"cuda:0\")\r\n model = self.model\r\n model.to(device)\r\n args = self.args\r\n\r\n # # reassgin unique_id for features to keep order for federated learning situation\r\n # unique_id = 1000000000\r\n # for feature in self.test_dl.features:\r\n # feature.unique_id = unique_id\r\n # unique_id += 1\r\n\r\n examples = test_data.examples\r\n features = test_data.features\r\n\r\n eval_loss = 0.0\r\n nb_eval_steps = 0\r\n model.eval()\r\n\r\n # if args.n_gpu > 1:\r\n # model = torch.nn.DataParallel(model)\r\n\r\n if self.args.fp16:\r\n from torch.cuda import amp\r\n\r\n all_results = []\r\n for batch in tqdm(test_data, disable=args.silent, desc=\"Running Evaluation\"):\r\n batch = tuple(t.to(device) for t in batch)\r\n\r\n with torch.no_grad():\r\n inputs = {\r\n \"input_ids\": batch[1],\r\n \"attention_mask\": batch[2],\r\n \"token_type_ids\": batch[3],\r\n }\r\n\r\n if self.args.model_type in [\r\n \"xlm\",\r\n \"roberta\",\r\n \"distilbert\",\r\n \"camembert\",\r\n \"electra\",\r\n \"xlmroberta\",\r\n \"bart\",\r\n ]:\r\n del inputs[\"token_type_ids\"]\r\n\r\n example_indices = batch[4]\r\n\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\r\n\r\n if self.args.fp16:\r\n with amp.autocast():\r\n outputs = model(**inputs)\r\n eval_loss += outputs[0].mean().item()\r\n else:\r\n outputs = model(**inputs)\r\n eval_loss += outputs[0].mean().item()\r\n begin_idx = len(all_results)\r\n for i, _ in enumerate(example_indices):\r\n eval_feature = features[begin_idx + i]\r\n unique_id = int(eval_feature.unique_id)\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n # XLNet uses a more complex post-processing procedure\r\n result = RawResultExtended(\r\n unique_id=unique_id,\r\n start_top_log_probs=to_list(outputs[0][i]),\r\n start_top_index=to_list(outputs[1][i]),\r\n end_top_log_probs=to_list(outputs[2][i]),\r\n end_top_index=to_list(outputs[3][i]),\r\n cls_logits=to_list(outputs[4][i]),\r\n )\r\n else:\r\n result = RawResult(\r\n unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i]),\r\n )\r\n all_results.append(result)\r\n\r\n nb_eval_steps += 1\r\n\r\n eval_loss = eval_loss / nb_eval_steps\r\n\r\n prefix = \"test\"\r\n os.makedirs(output_dir, exist_ok=True)\r\n\r\n output_prediction_file = os.path.join(output_dir, \"predictions_{}.json\".format(prefix))\r\n output_nbest_file = os.path.join(output_dir, \"nbest_predictions_{}.json\".format(prefix))\r\n output_null_log_odds_file = os.path.join(output_dir, \"null_odds_{}.json\".format(prefix))\r\n\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n # XLNet uses a more complex post-processing procedure\r\n (all_predictions, all_nbest_json, scores_diff_json, out_eval,) = write_predictions_extended(\r\n examples,\r\n features,\r\n all_results,\r\n args.n_best_size,\r\n args.max_answer_length,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n None,\r\n model.config.start_n_top,\r\n model.config.end_n_top,\r\n True,\r\n tokenizer,\r\n verbose_logging,\r\n )\r\n else:\r\n all_predictions, all_nbest_json, scores_diff_json = write_predictions(\r\n examples,\r\n features,\r\n all_results,\r\n args.n_best_size,\r\n args.max_answer_length,\r\n args.do_lower_case,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n verbose_logging,\r\n True,\r\n args.null_score_diff_threshold,\r\n )\r\n\r\n return all_predictions, all_nbest_json, scores_diff_json, eval_loss", "def eval_step(total_loss):\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n tpu_estimator_spec = self._call_model_fn(features, labels)\n if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):\n raise RuntimeError(\n 'estimator_spec used by TPU evaluation must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n loss = tpu_estimator_spec.loss\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n to_record = {}\n to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics\n if tpu_estimator_spec.host_call is not None:\n # We assume that evaluate won't update global step, so we don't wrap\n # this host_call.\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return math_ops.add(total_loss, loss)", "def comprehensiveEvaluation(directory = defaultDirectory(), \n pruneGlobal = True, numLayers = 2, \n numNodesPerLayer = 200, randSeed = 1,\n trainPer = .6, valPer = .25, testPer = .15,\n totalPer = 1, batchSize = 64,\n numEpochs = 1000, learningRate = 0.001, \n l2Reg = 0.0001, modelFile = None,\n useGRU = False,\n dropoutI = 0.2, dropoutH=0.2, \n trainMode = 'continue', randSeed2 = None):\n trainPer, valPer, testPer, totalPer = normalizePercentages(trainPer, valPer, testPer, totalPer)\n if modelFile is None:\n modelFile = nameModelFile('', useGRU, numLayers, numNodesPerLayer, randSeed,\n trainPer, valPer, testPer, totalPer, dropoutI, dropoutH, l2Reg)\n \n np.random.seed(randSeed) #control permutation of data\n # prune global coordinate data?\n if pruneGlobal:\n pruneRange = range(0, 18)\n else:\n pruneRange = None\n \n struct = loadDataset(directory, pruneRange, trainPer, valPer, \n testPer, totalPer, '.left', True)\n \n if randSeed2 is not None: #control randomization of training\n np.random.seed(randSeed2)\n trainGestureRNN(numLayers=numLayers, numNodesPerLayer=numNodesPerLayer,\n useGRU=useGRU, batchSize=batchSize, \n numEpochs = numEpochs, learningRate=learningRate,\n l1Reg=0, l2Reg = l2Reg, dropoutI=dropoutI, dropoutH=dropoutH,\n sequences = struct[0], classes = struct[1],\n trainRange = struct[2], valRange = struct[3],\n testRange = struct[4], numClasses = struct[5],\n numObservations = struct[6], numSequences = struct[7],\n numFeatures = struct[8],\n modelFile=modelFile, \n trainMode=trainMode,\n callbacks = [EarlyStopping(patience=20)])", "def val_func(self, data, label):\r\n self.net.eval()\r\n\r\n with torch.no_grad():\r\n outputs, losses = self.forward(data, label)\r\n\r\n return outputs, losses", "def evaluate(self,**kwargs):\n # setup model\n self.optimizer = SGD(lr = 0,momentum=0,decay = 0)\n self.createModel()\n self.setGenerators()\n self.printParameters()\n output = {}\n\n if kwargs['validationOnly'] != None:\n if kwargs['validationOnly'] == True:\n valOnly = True\n else:\n valOnly = False\n else:\n valOnly = False\n\n if valOnly == False:\n trainOutput = self.model.evaluate_generator(\n generator = self.trainGen,\n steps=self.steps_per_epoch,\n use_multiprocessing=True,\n verbose=1\n )\n output['loss'] = trainOutput[0]\n for i in range(len(self.metricsAsString)):\n output[self.metricsAsString[i]] = trainOutput[i+1]\n\n print(\"loss : \" + str(output['loss']))\n for i in range(len(self.metricsAsString)):\n tmp = self.metricsAsString[i] \n print(tmp + \" : \" + str(output[tmp])) \n\n validationOutput = self.model.evaluate_generator(\n generator = self.validateGen,\n steps=self.validation_steps, \n use_multiprocessing=True, \n verbose=1)\n \n output['val_loss'] = validationOutput[0]\n for i in range(len(self.metricsAsString)):\n output[\"val_\" + self.metricsAsString[i]] = validationOutput[i+1]\n \n\n print(\"val_loss : \" + str(output['val_loss']))\n for i in range(len(self.metricsAsString)):\n tmp = \"val_\" + self.metricsAsString[i] \n print(tmp + \" : \" + str(output[tmp]))", "def evaluate(self, neural_network: NeuralNetwork) -> np.ndarray:\n if self.i - 1 >= self.k:\n logging.error(\"No more training iterations!!\")\n return np.array([])\n logging.info(\"Iteration {}/{}\".format(self.i, self.k))\n _, test = self.indexes[self.i - 1]\n return neural_network.feed_forward(\n self.data.take(test, axis=-1)\n )", "def _evaluate(self, y_true, y_pred):\n pass", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):\n # reload weights from restore_dir if specified\n if restore_dir is not None:\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n \n best_val_f1 = 0.0\n patience_counter = 0\n\n for epoch in range(1, params.epoch_num + 1):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch, params.epoch_num))\n\n # Compute number of batches in one epoch\n params.train_steps = params.train_size // params.batch_size\n params.val_steps = params.val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)\n\n # Train for one epoch on training set\n train_epoch(model, train_data_iterator, optimizer, scheduler, params)\n\n # data iterator for evaluation\n # train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)\n val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)\n\n # Evaluate for one epoch on training set and validation set\n # params.eval_steps = params.train_steps\n # train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1\n params.eval_steps = params.val_steps\n val_metrics = evaluate(model, val_data_iterator, params, mark='Val')\n \n val_f1 = val_metrics['f1']\n improve_f1 = val_f1 - best_val_f1\n if improve_f1 > 1e-5: \n logging.info(\"- Found new best F1\")\n best_val_f1 = val_f1\n model.save_pretrained(model_dir)\n if improve_f1 < params.patience:\n patience_counter += 1\n else:\n patience_counter = 0\n else:\n patience_counter += 1\n\n # Early stopping and logging best f1\n if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:\n logging.info(\"Best val f1: {:05.2f}\".format(best_val_f1))\n break", "def evaluate(self):\n pass", "def evaluate(self):\n pass" ]
[ "0.70532185", "0.69431454", "0.6933192", "0.6898252", "0.6769754", "0.6761503", "0.6628612", "0.65984225", "0.6573417", "0.6542276", "0.65251", "0.65153444", "0.6453402", "0.6376132", "0.6350992", "0.6338785", "0.6319733", "0.6301764", "0.62932855", "0.6274323", "0.62724113", "0.6261761", "0.62548596", "0.6247638", "0.62239754", "0.62223434", "0.62210804", "0.6207677", "0.6192141", "0.6177173", "0.6174179", "0.6173052", "0.61684567", "0.6168313", "0.61679703", "0.61656153", "0.6161233", "0.61604995", "0.61596763", "0.61558217", "0.61449385", "0.61419165", "0.61385274", "0.6126619", "0.61139894", "0.60873234", "0.60862666", "0.60727865", "0.6070855", "0.60672283", "0.60566413", "0.6018994", "0.60064644", "0.600358", "0.600254", "0.59990495", "0.5997426", "0.5994504", "0.5983075", "0.5982727", "0.5979085", "0.59745646", "0.59586805", "0.5958036", "0.5957934", "0.5954995", "0.5954877", "0.5954877", "0.5954877", "0.5951346", "0.59502715", "0.59470767", "0.59413767", "0.5940535", "0.5935082", "0.59240013", "0.5922641", "0.59174824", "0.5908576", "0.59058106", "0.5904115", "0.5903918", "0.590023", "0.58881885", "0.5879541", "0.5876984", "0.58744264", "0.5870205", "0.5866115", "0.5862158", "0.58508265", "0.5849594", "0.5846974", "0.5840651", "0.58406377", "0.58393365", "0.58375674", "0.58367616", "0.58337253", "0.5829678", "0.5829678" ]
0.0
-1
Reset the process of training, which includes the loss meter reset, epoch reset and model's weights reset.
def reset_train(self): self.model.apply(self._reset_weights) self.epoch_loss.reset() self.epoch = 0 del self.batch_process self.batch_process = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def train_loop_begin(self):\r\n for _, train_loss_metric in self.training_losses.items():\r\n train_loss_metric.reset_states()\r\n\r\n for _, metrics in self.training_metrics.items():\r\n for metric in metrics:\r\n metric.reset_states()", "def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0", "def reset(self):\n checkpoint = torch.load(\n 'model_lr_finder.pth.tar',\n map_location=self.device)\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.to(self.device)\n self.model.train()", "def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None", "def reset(self):\n self.loss = []\n self.funcargs = []\n self.nSteps = 0 \n self.converged = False", "def reset(self):\n self.loss = 0\n self.cnt = 0", "def reset(self):\n self.acc_loss = 0\n self.norm_term = 0", "def reset_training(self):\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n self.q_optim = Adam(self.q_net.parameters(), lr=self.lr)\n\n self.alpha_optim = Adam([self.log_alpha], lr=1e-2)", "def reset(self) -> None:\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0", "def reset(self) -> None:\n self.is_run = False\n self.env_step = 0\n if self.resume_from_log:\n self.start_epoch, self.env_step, self.gradient_step = \\\n self.logger.restore_data()\n\n self.last_rew, self.last_len = 0.0, 0\n self.start_time = time.time()\n if self.train_collector is not None:\n self.train_collector.reset_stat()\n\n if self.train_collector.policy != self.policy:\n self.test_in_train = False\n elif self.test_collector is None:\n self.test_in_train = False\n\n if self.test_collector is not None:\n assert self.episode_per_test is not None\n assert not isinstance(self.test_collector, AsyncCollector) # Issue 700\n self.test_collector.reset_stat()\n test_result = test_episode(\n self.policy, self.test_collector, self.test_fn, self.start_epoch,\n self.episode_per_test, self.logger, self.env_step, self.reward_metric\n )\n self.best_epoch = self.start_epoch\n self.best_reward, self.best_reward_std = \\\n test_result[\"rew\"], test_result[\"rew_std\"]\n if self.save_best_fn:\n self.save_best_fn(self.policy)\n\n self.epoch = self.start_epoch\n self.stop_fn_flag = False\n self.iter_num = 0", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def reset_training_data(self):\n logger.info(\"resetting training data\")\n if self.shuffle:\n random.shuffle(self.tweets)\n self.batch_generator = self.get_batch()", "def reset(self):\n \n s = self\n s.step_counter = 0\n \n # TODO: initialize first layer activations here, and not everywhere else\n # self.model.initialize_local_vars()\n # self.model.initialize_global_vars()\n\n ops = []\n\n for var in self.model.trainable_vars:\n if self.needs_correction(var):\n A_svd = s[var].A.svd\n B2_svd = s[var].B2.svd \n ops.extend(A_svd.init_ops)\n ops.extend(B2_svd.init_ops)\n ops.append(s[var].A.cov.initializer)\n ops.append(s[var].B2.cov.initializer)\n\n # in new TensorFlow this breaks, probably because of\n # https://github.com/tensorflow/tensorflow/commit/07adc2ea910de715d31e16a019fcbcccb575e931\n # sometimes get \"need to feed\" placeholder error\n # sometimes do not get this error, but spend two minutes inside\n # _build_initializer_expr\n s.run(ops)", "def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def _reset(self):\n self.loss_history = []\n self.optim_configs = {}\n for p in self.model.params:\n d = {k: v for k, v in self.optim_config.items()}\n self.optim_configs[p] = d", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset(self, runs):\n\n self.answer_wrong = 0\n self.answer_right = 0\n self.train_new(runs)", "def shutdown_training(self):\n\n self._train_data_set = None\n self._test_data_set = None", "def reset(self):\n self.clean_cache_upstream()\n self.set_mode_train()\n for step_obj in self.all_upstream_steps.values():\n step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable']\n step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting']\n step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output']\n step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output']\n step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output']\n logger.info('Step {}, reset all upstream Steps to default training parameters, '\n 'including this Step'.format(self.name))\n return self", "def reset_train_results(self):\n self.train_loss_results = {}\n self.train_accuracy_results = {}\n self.train_pred_results = {}", "def _reset(self):\n self.use_gpu = torch.cuda.is_available()\n if self.use_gpu:\n self.model = self.model.cuda()\n self.hist_train_psnr = []\n self.hist_val_psnr = []\n self.hist_loss = []", "def reset(self):\n self.pred = None\n self.target = None", "def reset(self):\n self.pred = None\n self.target = None", "def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()", "def reset(self):\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self._random_state = check_random_state(self.random_state)\n if self.base_estimators:\n self.experts = [\n self.WeightedExpert(\n cp.deepcopy(be), 1, self.labels)\n for be in self.base_estimators\n ]\n else:\n self.experts = [\n self._construct_new_expert()\n ]", "def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.target_ids = self.target_ids[inv_perm]\n self.new_epoch()", "def reset_states(self):\n self.mean_makespan_baseline.assign(0)\n self.mean_makespan_train.assign(0)\n self.step.assign(0)", "def reset(self):\n self.ai.reset()", "def reset(self) -> None:\n self.precision.reset()\n self.recall.reset()", "def reset(self):\n # reset rewards\n self.reward = torch.zeros(self.batch_size, self.game_num_agents)\n\n self.has_been = torch.zeros(self.batch_size, self.nsteps, self.game_num_agents)\n\n self.terminal = torch.zeros(self.batch_size)\n\n self.step_counter = 1\n\n self.active", "def reset_parameters(self):\n self.lstm.reset_parameters()", "def reset(self):\n self.best_model = None\n self.best_res = -1", "def reset(self):\n # from pathlib import Path\n # import pickle as pkl\n # path_traj = Path.home() / 'TmrlData' / 'reward' / 'traj.pkl'\n # with open(path_traj, 'wb') as file_traj:\n # pkl.dump(self.traj, file_traj)\n\n self.cur_idx = 0\n self.step_counter = 0\n self.failure_counter = 0\n\n # self.traj = []", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.new_epoch()", "def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0", "def reset(self):\n self.error_p = 0.0\n self.error_i = 0.0\n self.error_d = 0.0\n self.errors = [ 0.0 ] * self.samples\n if callable(self.debug_callback):\n self.debug_callback(\"reset\")", "def reset_nn(self): # Clear current network\n self.weights = np.zeros((p.num_rovers, self.n_weights))\n self.in_layer = np.zeros((p.num_rovers, self.n_inputs))\n self.hid_layer = np.zeros((p.num_rovers, self.n_nodes))\n self.out_layer = np.zeros((p.num_rovers, self.n_outputs))", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def _reset(self):\n if self.mode not in ['auto', 'min', 'max']:\n warnings.warn(\n 'Learning rate reduction mode %s is unknown, '\n 'fallback to auto mode.' % self.mode\n )\n self.mode = 'auto'\n if self.mode == 'min' or (\n self.mode == 'auto' and 'acc' not in self.monitor\n ):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0", "def reset(self):\n self._weights.clear()", "def reset(self):\n self._reset_next_step = False\n self.step_count = 0\n \n self._state = self.state_initializer()\n self._meta_state = self._meta_state_initializer()\n self.task.reset(self._state, self._meta_state)\n self.physics.reset(self._state)\n self.action_space.reset(self._state)\n for rule in self.game_rules:\n rule.reset(self._state, self._meta_state)\n rule.step(self._state, self._meta_state)\n \n return dm_env.restart(self.observation())", "def reset(self):\n self.best_res = -1\n self.best_epoch = self.ignore_before", "def reset(self, model):\n self.reset_strategy(model)", "def reset(self):\n self.velocity_controller.reset()\n self.yaw_filter.reset()", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def reset(self):\n\n def reset_function(module):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n m.reset_parameters()\n\n self.apply(reset_function)", "def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0", "def reset(self):\n self.data = {}\n self.pf.reset()\n\n self.tc.reset()\n # Reset the neuron grid\n (self.n_n, XE, YE, IE, _, _) = self.init_pix_rf_centers(\n self.l_n, self.l_i, self.ds, self.de, mode=self.neuron_layout,\n drop_prob=self.drop_prob\n )\n self.tc.t_XE.set_value(XE)\n self.tc.t_YE.set_value(YE)\n self.tc.t_IE.set_value(IE)\n self.pf = self.init_particle_filter(self.motion_prior, self.n_p)", "def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None", "def reset(self):\n\n self.rotation = 0\n self.iteration = 0\n self.predictions = []\n self.prediction = 0\n self.current_position = 0\n self.rotation_list = [0]\n self.prediction = 0\n self.initial_adjust = False", "def reset(self):\r\n assert self.testing is True\r\n\r\n # Clear queues\r\n self._coordinator.request_stop()\r\n with self._fread_queue.mutex: # Unblock any self._fread_queue.get calls\r\n self._fread_queue.queue.clear()\r\n for _ in range(2*self.num_threads):\r\n self._fread_queue.put(None)\r\n while True: # Unblock any enqueue requests\r\n preprocess_queue_size = self._tensorflow_session.run(self._preprocess_queue_size_op)\r\n if preprocess_queue_size == 0:\r\n break\r\n self._tensorflow_session.run(self._preprocess_queue_clear_op)\r\n time.sleep(0.1)\r\n while True: # Unblock any self._fread_queue.put calls\r\n try:\r\n self._fread_queue.get_nowait()\r\n except queue.Empty:\r\n break\r\n time.sleep(0.1)\r\n self._coordinator.join(self.all_threads, stop_grace_period_secs=5)\r\n\r\n # Restart threads\r\n self._coordinator.clear_stop()\r\n self.create_and_start_threads()", "def reset_train_pointer(self):\n self.train_pointer = 0\n\n if self.shuffle:\n self.shuffle_data()", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset_states(self):\n K.batch_set_value([(v, 0) for v in self.variables])", "def reset(self):\n for layer in self.network:\n layer.clean()", "def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None):\n if new_base_lr is not None:\n self.base_lr = new_base_lr\n if new_max_lr is not None:\n self.max_lr = new_max_lr\n if new_step_size is not None:\n self.step_size = new_step_size\n self.clr_iterations = 0.0", "def reset_states(self):\n self.model.reset_states()", "def _reset(self):\n if self.mode not in ['auto', 'min', 'max']:\n logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '\n 'fallback to auto mode.', self.mode)\n self.mode = 'auto'\n if (self.mode == 'min' or\n (self.mode == 'auto' and 'acc' not in self.monitor)):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0", "def _reset(self):\n if self.mode not in ['auto', 'min', 'max']:\n warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '\n 'fallback to auto mode.' % (self.mode),\n RuntimeWarning)\n self.mode = 'auto'\n if (self.mode == 'min' or\n (self.mode == 'auto' and 'acc' not in self.monitor)):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0", "def reset(self):\n logger.info(\"resetting worker #%i\", self.myid)\n self.model.projection = self.model.projection.empty_like()\n self.finished = False", "def reset(self):\n self.steps = 0\n self.state = 0\n self.trajectory = []", "def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1", "def reset_all(self):\n self._stepsize = _stepsize\n self.reset_f()\n self.reset_s()\n self.reset_u()", "def reset_model(self):\n raise NotImplementedError", "def reset_model(self):\n raise NotImplementedError", "def reset_model(self):\n raise NotImplementedError", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def reset(self):\n self.resetPos()\n self.vx, self.vy = 0, 0\n self.accel, self.dangle = 0, 0\n self.crashed = False\n self.timeDriving, self.score, self.checkpoint, self.laps = 0, 0, 0, 0\n self.targetCheckpointPos = self.maze.checkpoints[0].getMidInt()\n self.inputColour = [sensor_colours[0] for i in range(self.dimensions[0])]\n self.scan = np.array([0 for i in range(self.dimensions[0])])\n self.cost = [0 for i in range(6)]\n #Extrapos for CTS LOS\n self.extrapos = []", "def reset(self):\n self.params.resetParams()", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()", "def reset_parameters(self):\n \n for i in range(self.num_layers):\n getattr(self, 'LSTMCell%i'%(i+1)).reset_parameters()", "def reset_epoch(self):\n self.ix = 0", "def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def reset_parameters(self):\n model_utils.truncated_normal_(self.weight, mean=0.0, std=0.1)\n model_utils.truncated_normal_(self.bias, mean=0.0, std=0.1)", "def reset(self):\n self.close()\n self._sess = tf.Session(graph=self._graph)\n self._sess.run(self._initializer)", "def turn_off_learning(self):\n self.epsilon = 0\n self.alpha = 0", "def reset(self):\n\t\tself.graph = OrderedDict()\n\t\tself.bottoms = OrderedDict()\n\t\tself.output_shape = OrderedDict()\n\t\tself.cur_tensor = None\n\t\tself.cur_id = None\n\t\tself.tmp_list = []\n\t\tself.log_init()", "def forget(self):\n self.initialize(self.input_size, self.targets)\n self.epoch = 0", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def reset(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def reset(self):\n Simulation.reset(self)", "def _reset_histories(self):\n\t\tself.train_loss_history = []\n\t\tself.train_acc_history = []\n\t\tself.val_acc_history = []\n\t\tself.val_loss_history = []", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def reset_weights(self):\n self.head.reset_weights()", "def initialize_training(self, training_info):\n self.model.reset_weights()\n self.algo.initialize(self.settings, model=self.model, environment=self.environment, device=self.device)", "def reset(self):\n self.num_steps = 0\n self.world_state = self.action = None", "def reset(self):\n self.view.running = False\n if not self.validInput:\n self.view.defaultTextCoordinates()\n self.validInput = False\n self.view.clearCanvas()\n self.view.stopView()\n self.model.coordinates.clear()\n self.bestPath.clear()\n self.bestDistance = float(\"inf\")\n self.refresh_time = REFRESH_TIME_MS\n self.iterationCounter = 0\n self.view.updateLabels(0, float(\"inf\"))", "def reset_parameters(self):\n self.rnn.reset_parameters()\n self.action_lookup.reset_parameters()\n\n # self.state_dict_lookup.reset_parameters()\n self.own_c_lookup.reset.parameters()\n self.own_s_lookup.reset_parameters()\n\n self.th_1_lookup.reset_parameters()\n self.th_2_lookup.reset_parameters()\n self.th_3_lookup.reset_parameters()\n self.f_1_lookup.reset.parameters()\n self.f_2_lookup.reset_parameters()\n self.f_3_lookup.reset_parameters()\n self.f_4_lookup.reset_parameters()\n\n self.bu_msg_lookup.reset_parameters()\n\n self.i_t_lookup.reset_parameters()\n self.lives_lookup.reset_parameters()\n\n self.prev_action_lookup.reset_parameters()\n # self.message.apply(weight_reset)\n self.output.apply(weight_reset)\n for p in self.rnn.parameters():\n p.data.uniform_(*self.init_param_range)", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def reset(self):\n self.restart()\n self.cycles = 0", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)", "def reset(self) -> None:\n # See section 7.2.2 of the datasheet for reset description.\n self._reset.value = True\n time.sleep(0.0001) # 100 us\n self._reset.value = False\n time.sleep(0.005) # 5 ms", "def reset_epoch_cache(self):\n self.epoch_cache = {\"train\":PerformanceBatch(), \n \"val\":PerformanceBatch(), \n \"test\":PerformanceBatch()}", "def reset(self, batch_size=None, is_new_epoch=False):\n if is_new_epoch:\n self.epoch += 1\n\n self.batch_sampler.reset(batch_size, epoch=self.epoch)" ]
[ "0.80868524", "0.7918954", "0.7916748", "0.7766766", "0.7735061", "0.76176995", "0.7499007", "0.7421006", "0.7351169", "0.72698146", "0.7228589", "0.7210134", "0.71855277", "0.70681477", "0.701447", "0.6989159", "0.69768757", "0.6957746", "0.6951352", "0.6948265", "0.69307107", "0.6917882", "0.69028765", "0.69028765", "0.6899475", "0.68962693", "0.6867698", "0.68386054", "0.68189144", "0.6818599", "0.6809041", "0.68071043", "0.6799725", "0.6781363", "0.67684066", "0.6762392", "0.6745881", "0.67348844", "0.67340916", "0.67321664", "0.672256", "0.6720793", "0.6705599", "0.6699408", "0.6680852", "0.66770285", "0.66767514", "0.66648793", "0.6654189", "0.6646928", "0.6644415", "0.6644187", "0.66319156", "0.6626839", "0.6614425", "0.65993017", "0.65991765", "0.6584359", "0.6582421", "0.65811354", "0.6572368", "0.6563977", "0.6555313", "0.65465254", "0.65386325", "0.65258753", "0.65180016", "0.65180016", "0.65180016", "0.6499068", "0.64872235", "0.6484481", "0.64838403", "0.648364", "0.6482987", "0.64765817", "0.647203", "0.6470796", "0.6458869", "0.6456978", "0.6452638", "0.6452548", "0.6450505", "0.64491427", "0.6441727", "0.64381623", "0.64314663", "0.643028", "0.6429857", "0.64280486", "0.6426257", "0.64260054", "0.64199215", "0.641338", "0.6411156", "0.6402492", "0.64017564", "0.6397659", "0.6396677", "0.6393761" ]
0.9016183
0
Save only the state dict of the model.
def save_weights(self, location: str): # import torch torch = import_optional_dependency("torch") torch.save(self.model.state_dict(), self._set_save_location(location))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_state(self):\n pass", "def save_state(self) -> None:\n raise NotImplementedError(\"Save state is is not implemented.\")", "def saveState(self) -> None:\n # TODO: Saves State\n pass", "def save(self, *args, **kwargs):\n if self.state: self.state.save()", "def save_state(self, safe=True):\n self.state.save(safe=safe)", "def save(self):\n\n if not self.revertable:\n return\n\n state = {}\n for x in self.toSave:\n state[x] = deepcopy(self.toSave[x]())\n\n #made a new model, reparent it so it displays\n state[\"model\"].reparentTo(base.render)\n\n #add it to the stack\n self.stack.append(state)\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,-THING_REVERT_DISTANCE))", "def save_checkpoint(self) -> Dict[str, Union[Dict[str, torch.Tensor], dict]]:\n if isinstance(self.model, nn.DataParallel) or isinstance(self.model, nn.parallel.DistributedDataParallel):\n model = self.model.module.state_dict()\n else:\n model = self.model.state_dict()\n\n checkpoint = {\n \"model_state_dict\": deepcopy(model),\n \"optimizer_state_dict\": deepcopy(self.optimizer.state_dict()),\n }\n return checkpoint", "def savestate(self, state):\n pass", "def __getstate__(self):\n import time\n\n state = self.__dict__.copy()\n\n # Remove the unpicklable entries\n del state['_model']\n del state['_input']\n del state['_output']\n del state['_preds_op']\n del state['_loss']\n del state['_loss_grads']\n del state['_preds']\n del state['_layer_names']\n\n model_name = str(time.time()) + '.h5'\n state['model_name'] = model_name\n self.save(model_name)\n return state", "def save_state_to_dict(self):\n return self.__dict__.copy()", "def save_model(self):\n pass", "def _save_state(self) -> None:\n state_file = self._get_state_file()\n logger.info(\"Saving state to %s\", state_file)\n\n data = {}\n data[\"version\"] = mopidy.__version__\n data[\"state\"] = CoreState(\n tracklist=self.tracklist._save_state(),\n history=self.history._save_state(),\n playback=self.playback._save_state(),\n mixer=self.mixer._save_state(),\n )\n storage.dump(state_file, data)\n logger.debug(\"Saving state done\")", "def save_model(self, path=\"/model\"):\n state = {\n 'epoch': self.epoch_counter,\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n torch.save(state, path)", "def _save_state(self):\n try:\n param_value = json.dumps({\n self._TIME_KEY: self.last_timestamp,\n self._STATE_KEY: self.current_state,\n self._CONTEXT_KEY: self.context,\n })\n except TypeError as err:\n raise AppStateError('Could not serialize state for name \\'{}\\'. Error: '\n '{}'.format(self._state_name, str(err)))\n\n @backoff.on_exception(backoff.expo,\n ClientError,\n max_tries=self.MAX_STATE_SAVE_TRIES,\n jitter=backoff.full_jitter)\n def save():\n \"\"\"Function to save the value of the state dictionary to parameter store\"\"\"\n self.SSM_CLIENT.put_parameter(\n Name=self._state_name,\n Description=self._STATE_DESCRIPTION.format(self._app_type, self.function_name),\n Value=param_value,\n Type='SecureString',\n Overwrite=True\n )\n try:\n save()\n except ClientError as err:\n raise AppStateError('Could not save current state to parameter '\n 'store with name \\'{}\\'. Response: '\n '{}'.format(self._state_name, err.response))", "def save( self ):\n try:\n state_dict = {\n 'epoch': self.epoch,\n 'epoch_loss': self.epoch_loss,\n 'global_step': self.global_step,\n 'mechanism_weights': self.mechanism_weights, # Save row.\n 'router_state': self.router.state_dict(), # Save router state.\n 'nucleus_state': self.nucleus.state_dict(), # Save nucleus state.\n 'optimizer_state': self.optimizer.state_dict(), # Save optimizer.\n }\n torch.save( state_dict, \"{}/model.torch\".format( self.config.neuron.full_path, self.epoch_loss ) )\n bittensor.logging.success(prefix='Saved model', sufix='<blue>{}/model.torch</blue>'.format( self.config.neuron.full_path ) )\n except Exception as e:\n logger.exception('Failed to save model with error:{}', e)", "def save(self, dest: str) -> None:\n # Get the state dictionary\n model_state = self.state_dict()\n\n # Add some information for our specific module:\n model_state['additional_state'] = {}\n model_state['additional_state']['configuration'] = self._configuration\n\n # Serialize model\n torch.save(model_state, dest)", "def save_to_checkpoint(self, chkpt):\n chkpt[self.name] = self.state_dict()", "def writeState(self, saveState: ghidra.framework.options.SaveState) -> None:\n ...", "def saveModel(self):\n with open(self.modelSaveFile, 'wb') as f:\n pickle.dump(self.values, f, pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.policy, f, pickle.HIGHEST_PROTOCOL)", "def saveStateOfThisRun(self):\n with open('stateFile.json', 'w') as statefile:\n json.dump(self.fileTobeUploaded, statefile, indent=4)", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)", "def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)", "def save(self):\n try:\n torch.save(self.model.state_dict(), os.path.join(self.save_path, \"save_point.pth\"))\n except:\n print(\"Unable to save the model\")", "def save(self, path=\"./trained_model.checkpoint\"):\n torch.save({\"state_dict\":self.working_q.state_dict}, path)", "def save_state(self, name):\n self.saved_states[name] = self.state.copy()", "def save_states(self, checkpoint):\n raise NotImplementedError()", "def make_save(self):\n\t\tsave = {}\n\t\tsave['p'] = self.p\n\t\tsave['injail'] = self.injail.copy()\n\t\tsave['tile'] = self.tile.copy()\n\t\tsave['bal'] = self.bal.copy()\n\t\tsave['goojf'] = self.goojf.copy()\n\t\tsave['isalive'] = self.isalive.copy()\n\t\tsave['jailturn'] = self.jailturn.copy()\n\t\tsave['ownedby'] = self.ownedby.copy()\n\t\tsave['numhouse'] = self.numhouse.copy()\n\t\tsave['ismortgaged'] = self.ismortgaged.copy()\n\t\tsave['num'] = self.num\n\t\tsave['numalive'] = self.numalive\n\t\tsave['uid'] = self.uid.copy()\n\t\tsave['freeparkingsum'] = self.freeparkingsum\n\t\tself.autosave = save", "def saveState(self, name):\n return self.parameters()", "def _save(self) -> None:\n self._saved_record = copy.deepcopy(self.record)\n self._saved_counter = copy.deepcopy(self.counter)\n self._saved_objects = copy.deepcopy(self.current_objects)", "def save(self):\n self.presavemodel()\n self.dbm().model_save(self)\n self.set_isdirty(False)\n # we might be smart about flushing when there is no id, so that saving a new model gets it's unique id\n if (self.id == None):\n self.flush_toupdate()", "def save (self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save_state(self):\n\t\tself._history['time'].append(self.t)\n\t\tstate = np.array(self.x[np.newaxis,:,:])\n\t\tself._history['state'] = np.vstack([self._history['state'],state])", "def save():\n pass", "def save(self):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError", "def save(self):\r\n # torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))\r\n torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model_INN.pt'))", "def saveGame(self) -> None:\n self.state[\"phase\"] = self._phase\n\n state_as_string = json.dumps(self.state)\n with open(self.save_location, \"w\") as File:\n File.write(state_as_string)", "def save(self):\n if self._dirty:\n # key and timestamp\n data = {\n InstanceStates.INSTANCE_TABLE_NAME: self._service,\n InstanceStates.INSTANCE_TABLE_ACCOUNT_REGION: self._current_account_region,\n InstanceStates.INSTANCE_TABLE_TIMESTAMP: Decimal(time.time())\n }\n\n # store instance states as one column per instance\n for i in self._state_info:\n data[i] = self._state_info[i]\n\n # instances to purge\n if len(self._instances_to_purge) > 0:\n data[InstanceStates.INSTANCE_TABLE_PURGE] = self._instances_to_purge\n\n self.state_table.put_item_with_retries(Item=data)\n self._dirty = False", "def saveState( self, state ):\n with open( self.settings.statusFilepath(), 'w' ) as statusFile:\n json.dump( {\n 'state': state\n }, statusFile )", "def save(self):\n\n pass", "def save_state(self):\n return {'log_formatstr': self.log_formatstr,\n 'backend_interval': self.backend_interval}", "def _log_model_state(self):\n\n state = {\n 'model_state': self.state_dict(),\n 'model_name': type(self).__name__,\n 'optimizer_state': self.optimizer.state_dict(),\n 'optimizer_name': type(self.optimizer).__name__,\n }\n\n return state", "def save(self, *args, **kwargs):\n if self.id is not None and 'update_fields' not in kwargs:\n kwargs['update_fields'] = self.non_state_fields\n super().save(*args, **kwargs)", "def save(self):\n raise NotImplementedError()", "def _get_model_state(self) -> dict:\n raise NotImplementedError", "def save_checkpoint(self, model):\n # print(f\"save model {self.save_model_path}\")\n torch.save(model.state_dict(), self.save_model_path)", "def save_state(self):\r\n state = {\r\n 'next_student_module_id': self.next_student_module_id,\r\n }\r\n with open(self.STATE_FILE, \"w\") as state_file:\r\n json.dump(state, state_file)\r\n self.say(\"Saved state: {}\".format(json.dumps(state, sort_keys=True)))", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def save(self):\n return Memento(self.state)", "def state_dict(self):\r\n return {'ImageModel': self.image_model.state_dict(),\r\n 'QuestionModel': self.question_model.state_dict(),\r\n 'AttentionModel': self.attention.state_dict()\r\n }", "def save():", "def save(self):\n # TODO (Pierre): code", "def serialize_dirty(self):\n pass", "def save_(self):\n if not self._edited:\n return\n data = {'history': self.dump()}\n with open(os.path.join(os.path.dirname(self.arch_handler.dicomdir_path), self.SAVE_NAME), \"w\") as outfile:\n json.dump(data, outfile)\n self._edited = False", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def _post_training(self):\n self._write_state_key()", "def save_data(self):\n pass", "def save(self):\n state = {\n \"log_manager\": self.log_manager.save(),\n \"dispatchers\": [dispatcher.save() for dispatcher\n in asyncore.socket_map.itervalues()\n if hasattr(dispatcher, \"save\")],\n }\n return state", "def get_model_state(self) -> dict:\n return {\n '__type__': CheckpointType.MODEL.value,\n '__cls__': self.__class__.__name__,\n 'state': self._get_model_state(),\n }", "def save(self):\n self.backend.save(list(self._d.items()))\n log.debug(\"save: {}\".format(self.backend.filename))", "def save_model_state(self, metric, model):\n torch.save(model.state_dict(), 'checkpoint.pt')\n self.best_metric = metric", "def save_state():\n logger.debug(\"called\")\n pwd_gate.save()\n preferences.save()\n shareBuffer.save()\n contacts.save()\n secrets.save()", "def save(self, path: str):\n torch.save(self.model.state_dict(), path)", "def save(self, path: str):\n torch.save(self.model.state_dict(), path)", "def save_model(self):\n if self.model:\n self.model.save(self.config[\"model_path\"])", "def _saveState(self, fname=None, save_backup=True):\n if fname is None:\n fname = self.filename\n filepath = Path(fname).resolve()\n\n # it is good to backup this file in caseit exists\n if save_backup:\n if filepath.exists(): # pylint: disable=no-member\n # gets folder/filename.* and transforms into folder/filename_{timestamp}.json\n filepath_backup = Path(filepath).with_name(\n \"{}_{}.json\".format(filepath.stem, timestamp_string()))\n logger.debug(\"Backup %s to %s\", filepath, filepath_backup)\n shutil.copy2(filepath, filepath_backup)\n\n # save to filepath, overwriting\n filepath.touch() # pylint: disable=no-member\n with open(filepath, 'w') as file:\n json_state = self.__toJSON()\n file.write(json.encode(json_state))\n self.__sha256__ = json_state[\"__sha256__\"]\n logger.debug(\"%s's sha: %s\", fname, json_state[\"__sha256__\"])", "def __getstate__(self):\r\n\r\n d = copy.copy(self.__dict__)\r\n del d['_room_table_model'] # Do not save easily re-creatable table models\r\n del d['_socket_table_model'] # Do not save easily re-creatable table models\r\n del d['_fuse_table_model'] # Do not save easily re-creatable table models\r\n del d['_fuse_tree_item_model']\r\n del d['_invoker']\r\n\r\n return d", "def save(self, path):\n save(self.actor_net.state_dict(), path + '_actor.pkl')\n save(self.critic_net.state_dict(), path + '_critic.pkl')", "def saveState(self,filename=None):\n # For now we just use pickle for convenience. In the future, could use np.savez or HDF5 (or FITS)\n if filename is None:\n if self.statefile:\n filename = self.statefile\n else:\n filename = self.filename + '.cysolve.pkl'\n orig_statefile = self.statefile\n orig_ar = self.ar\n self.ar = None\n fh = open(filename,'w')\n cPickle.dump(self,fh,protocol=-1)\n fh.close()\n self.ar = orig_ar\n self.statefile = orig_statefile\n print \"Saved state in:\", filename", "def _load_state(self, model_state: dict) -> bool:\n model = model_state.get('model')\n kwargs = model_state.get('kwargs')\n assert model.__class__ == self.model.__class__, \"Used another model in the save file\"\n assert kwargs == self._model_kwargs, \"Used different model params in the model state\"\n self.model = model\n self._model_kwargs = kwargs\n return True", "def _save_state(self, config_path=\"\"):\n state = {\n 'output_dir': str(self.ui.lineEdit_output_path.text()),\n 'device': str(self.ui.comboBox_device.currentText()),\n 'event_bits': str(self.ui.lineEdit_data_bits.text()),\n 'freq': str(self.ui.lineEdit_pulse_freq.text()),\n 'labels': self.digital_labels,\n 'timestamp': self.ui.checkBox_timestamp.isChecked(),\n 'comm_interface': self.ui.checkBox_comm.isChecked(),\n 'analog_on': self.ui.checkBox_analog_channels.isChecked(),\n 'analog_channels': eval(str(self.ui.lineEdit_analog_channels.text())),\n 'analog_sample_rate': self.ui.comboBox_analog_freq.currentIndex(),\n 'analog_dtype': self.ui.comboBox_dtype.currentIndex(),\n 'analog_labels': self.analog_labels,\n 'counter_input_terminal': str(self.ui.comboBox_ci.currentText()),\n 'counter_input_on': self.ui.checkBox_ci.isChecked(),\n }\n if not config_path:\n config_path = LAST_SESSION\n with open(config_path, 'wb') as f:\n pickle.dump(state, f)", "def save(self):\n if self._mode == 'dict':\n self._mode = 'shelve'\n self._shelve_mode = 'c'\n\n for key, value in self._dict.items():\n ckey = copy.copy(key)\n cvalue = copy.copy(value)\n self.add(ckey, cvalue, 'shelve', check=False)\n\n self._dict.clear()\n\n if self._mode == 'dict':\n self._mode = 'dict'\n self._shelve_mode = 'r'", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def _save_state(self):\n with open(self.histFile,'wb') as hf:\n hf.write(self.dbFile.Value)", "def _save_model(self):\n with open(self.filepath, 'wb') as file:\n pickle.dump(self.cmodel, file)", "def saveState(self, name):\n import copy\n d = copy.deepcopy(self._params)\n d['pulses'] = None\n return self._params", "def _save(self):\n self.logger.debug(\"Saving to persistence\")\n try:\n data = self.persistence_serialize()\n except NotImplementedError:\n # allow backwards compatibility or persisted_values way\n # generate item to be persisted by gathering all variables\n # to be persisted into a dictionary\n data = {persisted_var: getattr(self, persisted_var)\n for persisted_var in self.persisted_values()}\n\n # save generated dictionary under block's id\n self._persistence.save(data, self.id())", "def save_state(self):\n state_dir = path.dirname(self.state_filename)\n\n if not path.isdir(state_dir):\n os.makedirs(state_dir)\n\n with open(self.state_filename, 'w') as df:\n log.debug(\"Saving state of program %s to %s\" % (self.name, self.state_filename))\n yaml.safe_dump(self.state, df, default_flow_style=False)", "def save(self, path):\n save_dict = {\n 'model': {\n 'vocabulary': self.vocabulary,\n 'max_sequence_length': self.max_sequence_length\n },\n 'decorator': {\n 'params': self.network.get_params(),\n 'state': self.network.state_dict()\n }\n }\n torch.save(save_dict, path)", "def saveState(self, file):\n state = self.context.getState(getPositions=True, getVelocities=True, getParameters=True, getIntegratorParameters=True)\n xml = mm.XmlSerializer.serialize(state)\n if isinstance(file, str):\n with open(file, 'w') as f:\n f.write(xml)\n else:\n file.write(xml)", "def save_model(self, request, obj, form, change):\n obj.revise()", "def save_model(self, request, instance, form, change):\n pass", "def copy_model_state(model):\n model_state = deepcopy(model.state_dict())\n return model_state", "def save(self, *args, **kwargs):\n return", "def save(self):\n return None", "def save(self):\n self.rpc.call(MsfRpcMethod.CoreSave)", "def _save_transform_state(books):\n utils.clear_data()\n transform_state = {\n 'current_state': 1,\n 'books': books\n }\n with open(f'{PATH_TO_STATE}/current_state.json', 'w') as f:\n json.dump(transform_state, f)", "def _save_state(self):\n with open(os.path.join(self._workdir, '.git', 'drover'), 'wb') as f:\n cPickle.dump(self, f)", "def save(self) -> Any:\n if self._unsaved_values:\n requestor = Requestor(local_api_key=self._api_key)\n params = {}\n for k in self._unsaved_values:\n params[k] = getattr(self, k)\n if type(params[k]) is EasyPostObject:\n params[k] = params[k].flatten_unsaved()\n params = {self.snakecase_name(): params}\n url = self.instance_url()\n response, api_key = requestor.request(method=RequestMethod.PATCH, url=url, params=params)\n self.refresh_from(values=response, api_key=api_key)\n\n return self", "def save_model(self, *args, **kwargs):\n raise NotImplementedError", "def save_state(self, training_state: _TrainingState, fname: str):\n with open(fname, \"wb\") as fp:\n pickle.dump(training_state, fp)", "def checkpoint(self):\n save()" ]
[ "0.80469704", "0.7924474", "0.789037", "0.78862095", "0.7530521", "0.7401246", "0.7255777", "0.7228237", "0.72169715", "0.7137653", "0.7022203", "0.70193565", "0.7004299", "0.7002671", "0.69508386", "0.6921392", "0.6888518", "0.6883776", "0.68588847", "0.6837567", "0.6811201", "0.6759196", "0.6759196", "0.67527074", "0.67514", "0.6741906", "0.6720072", "0.67054486", "0.6691979", "0.6682659", "0.66785884", "0.6671979", "0.6665098", "0.6665098", "0.6665098", "0.6665098", "0.6665098", "0.66349655", "0.66343784", "0.6631988", "0.6631988", "0.6631988", "0.66268367", "0.66230303", "0.6614085", "0.6591367", "0.6586865", "0.65840286", "0.6578872", "0.6559195", "0.65534866", "0.6534208", "0.653229", "0.65240914", "0.65059364", "0.65037954", "0.6499068", "0.64917964", "0.6483454", "0.6483294", "0.6475032", "0.64733183", "0.64733183", "0.64733183", "0.64687896", "0.64642584", "0.6457122", "0.6455557", "0.6452316", "0.64390105", "0.6413199", "0.6396398", "0.6396398", "0.6392507", "0.6369182", "0.6368471", "0.6366778", "0.63650984", "0.6357137", "0.63488656", "0.6348086", "0.634104", "0.63267004", "0.62918264", "0.62814075", "0.62801427", "0.6276681", "0.6275366", "0.6260881", "0.62571615", "0.6248361", "0.6247425", "0.62458843", "0.6244243", "0.6243044", "0.6242494", "0.623774", "0.623744", "0.6235394", "0.623391", "0.62146115" ]
0.0
-1
Save only the whole model.
def save_model(self, location: str): # import torch torch = import_optional_dependency("torch") torch.save(self.model, self._set_save_location(location))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self):\n pass", "def save(self):\n self.presavemodel()\n self.dbm().model_save(self)\n self.set_isdirty(False)\n # we might be smart about flushing when there is no id, so that saving a new model gets it's unique id\n if (self.id == None):\n self.flush_toupdate()", "def save_model(self):\n if self.model:\n self.model.save(self.config[\"model_path\"])", "def save_model( self, request, obj, form, change ):\n obj.save()", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def save_model(self, *args, **kwargs):\n raise NotImplementedError", "def save(self):\n\n pass", "def save(path_to_model):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save_model(self, request, instance, form, change):\n pass", "def save (self):\n pass", "def save(self):\n print(\"==> Saving model to\", self.model_dir)\n self.model.save(self.model_dir)", "def saveModel(self):\n log.info(\"Saving model to %s...\" % self.savedModelsPath)\n self.model.save(self.savedModelsPath)", "def save_model(self, path):\n pass", "def save(\n self,\n modelSavePath\n ):\n pass", "def save(self):\n # TODO (Pierre): code", "def save():\n pass", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def save(self) -> None:\n pass", "def save():", "def save(self):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError", "def save_model(self, model_path: str):", "def save_model(self, path):\n self._model.save(path)", "def saveModel(self):\n with open(self.modelSaveFile, 'wb') as f:\n pickle.dump(self.values, f, pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.policy, f, pickle.HIGHEST_PROTOCOL)", "def save(self, obj):", "def save(self, *args, **kwargs):\n return", "def save(self):\n raise NotImplementedError()", "def save(self, *args, **kwargs):\n pass", "def save(self, *args, **kwargs):\n self.full_clean()\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.full_clean()\n super().save(*args, **kwargs)", "def save_model(self, file=None):\n return None", "def save_model(self, request, obj, form, change):\n obj.revise()", "def save(self):\n return None", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def saveModel(self):\n self._model.save_weights('./my_model')\n return None", "def _save(self, step, model):\n\n raise NotImplementedError()", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)", "def save(self, datastore, model):\n datastore.add(model)\n datastore.flush()\n return model", "def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)", "def save_model(obj):\n obj.full_clean(validate_unique=False)\n obj.save()", "def save(self):\n self.rpc.call(MsfRpcMethod.CoreSave)", "def save(self, obj):\n raise NotImplementedError", "def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()", "def save(self, request=None):\n return super().save()", "def save(self, to_file: Path)->None:\n self.model.save(to_file)", "def save(self, *args, **kwargs) -> Any:\n pass", "def save(self):\n return api.put([self])", "def saveModel(self):\n for feature in self.features:\n featureName = feature[\"name\"]\n modelProbs = self.model[featureName][\"probabilities\"]\n modelFreqs = self.model[featureName][\"frequencies\"]\n repository.saveProbabilites(modelProbs, self.modelName, featureName, self.modelClass)\n repository.saveFrequences(modelFreqs, self.modelName, featureName, self.modelClass)", "def save(self, model, path):\n _type = self.guess_type(path)\n if _type != \"notebook\":\n return super().save(model, path)\n else:\n shallow_model, splitted = self._split_model(model)\n super().save(splitted, self._get_splitted_uri(path))\n return super().save(shallow_model, path)", "def save_model(self, filename):\r\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save_now(self):\r\n self.save()", "def save_now(self):\r\n self.save()", "def save(self):\n self.db.commit()", "def _save_model(self):\n with open(self.filepath, 'wb') as file:\n pickle.dump(self.cmodel, file)", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def save_model(self, fname):\n self.get_booster().save_model(fname)", "def save(self, path):\n for i, m in enumerate(self.model_save):\n m.save(os.path.join(path, str(i) + \"-\" + m.name))", "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()", "def save_model(model, model_filepath):", "def save(self, model_file):\n pickle.dump(self, open(model_file, 'wb'))", "def _save_model(self):\n groups = {cluster: self.model.cluster_metadata.group(cluster)\n for cluster in self.cluster_ids}\n self.model.save(self.model.spike_clusters,\n groups,\n clustering_metadata=self.model.clustering_metadata,\n )\n info(\"Saved {0:s}.\".format(self.model.kwik_path))", "def save(self):\n self.session.commit()", "def save(self, *args):\n # need to do!!\n pass", "def save(self):\n self.__db.commit()", "def save(self, db):\n pass", "def save(self, file_path):\n self.model.save(file_path)", "def save(self, file_path):\n self.model.save(file_path)", "def save(self, file_path):\n self.model.save(file_path)", "def save_model(self, model):\n # get model file name\n root_dir = os.path.split(os.path.realpath(__file__))[0]\n model_path = os.path.join(root_dir, '..', 'common', 'model', self._this_party, self._task_chain_id)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n model_file_name = os.path.join(model_path, self._task_id + '.model')\n\n # save model to disk\n dump(model, model_file_name)", "async def save(self, model: T) -> T:\n if not isinstance(model, DeclarativeModel):\n raise TypeError(f\"save not supported for object of type {type(model)}\")\n return (await self.save_all([model]))[0]", "def save_model(cls, vocab, path, filename):\n return super().save_model(vocab, path, filename)", "def save(self, *args, **kwargs):\n raise NotImplementedError()", "def save_models(self):\n\n if self.keep_models_fixed:\n return\n\n super().save_models(os.path.join(MODELS_PATH, self.name))\n if not hasattr(self, 'train_checkpointer'):\n self._create_train_checkpointer()\n self.train_checkpointer.save(0)", "def save(self):\n\n if not self.revertable:\n return\n\n state = {}\n for x in self.toSave:\n state[x] = deepcopy(self.toSave[x]())\n\n #made a new model, reparent it so it displays\n state[\"model\"].reparentTo(base.render)\n\n #add it to the stack\n self.stack.append(state)\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,-THING_REVERT_DISTANCE))", "def save(self):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant save abstract elements')\r\n self.pre_save()\r\n return self", "def save(self):\n db.session.commit()", "def save(self, main_dir):\n with open(f'{main_dir}/models/model_N{self.N}.pkl', 'wb') as f:\n pickle.dump(self.model, f)", "def save(self) -> None:\n self.saver.save_model_and_weights(self.model)\n self.saver.save_data_shuffle_indices(\n self.data.eval_shuffler.ds_inds\n )\n self.saver.save_input_scaler(self.data.x.scaler)", "def save(self,model_path):\n pass\n # filename = \"Models/\"+model_path+\"1.sav\"\n # pickle.dump(self.crf_model, open(filename, 'wb'))", "def save_model(self, path):\n # Save server model\n self.server_model.set_params(self.model)\n model_sess = self.server_model.sess\n return self.server_model.saver.save(model_sess, path)", "def save(model: nn.Module, path):\n save_model(model, path)", "def save_model(program, model_path):\n fluid.save(program, model_path)\n logger.info(\"Already save model in {}\".format(model_path))", "def save_model(self, filename):\n\t\tpickle.dump(self, open(filename, 'wb'))\n\t\tprint('Model saved in',filename)", "def save_model(self, filename, overwrite=None):\n return\n pickle.dump(\n obj=self._model,\n file=open(filename, \"wb\")\n )", "def save_model(self):\n filename=self.name + '_words'\n file_write(filename, self.words)\n\n filename2=self.name+'_word_lengths'\n file_write(filename2, self.word_lengths)\n\n filename3=self.name+'_stems'\n file_write(filename3, self.stems)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n file_write(filename4, self.sentence_lengths)\n\n filename5= self.endings+'_endings'\n file_write(filename5, self.endings)", "def save(self, model_path):\n self.encoder.save(model_path)", "def save(self):\n\n self.__session.commit()", "def save(self):\n\n self.__session.commit()" ]
[ "0.81946814", "0.78372055", "0.77880627", "0.7760365", "0.7683972", "0.7658893", "0.7431407", "0.7427803", "0.73944384", "0.73944384", "0.73944384", "0.73944384", "0.73944384", "0.73874027", "0.7360161", "0.73583525", "0.7299202", "0.72941047", "0.72820115", "0.72592044", "0.72398007", "0.7220516", "0.7220516", "0.7220516", "0.71693444", "0.71539897", "0.71539897", "0.71539897", "0.71405715", "0.71309763", "0.7126633", "0.7072769", "0.7066343", "0.7025597", "0.69968545", "0.6986411", "0.6986411", "0.6983768", "0.69144565", "0.6858215", "0.6857918", "0.6857918", "0.6857918", "0.6855667", "0.68443036", "0.684023", "0.6834546", "0.6834546", "0.6829825", "0.68195486", "0.68075806", "0.67890143", "0.6777671", "0.67652357", "0.6739604", "0.67324793", "0.67269444", "0.67044204", "0.6693265", "0.66876656", "0.66688097", "0.6668487", "0.6668487", "0.663645", "0.663645", "0.66335535", "0.661968", "0.6615518", "0.6612191", "0.6604483", "0.6554147", "0.65477514", "0.653817", "0.65367544", "0.65185964", "0.6515693", "0.65103054", "0.64860374", "0.64807713", "0.64807713", "0.64807713", "0.6473659", "0.6469975", "0.64630824", "0.64472026", "0.64448875", "0.6437355", "0.6424221", "0.64209336", "0.6418127", "0.6405442", "0.6401623", "0.6394442", "0.6393614", "0.6387933", "0.63855946", "0.6380246", "0.63759696", "0.6369729", "0.6367501", "0.6367501" ]
0.0
-1
A method for checking the parameters before training, in order to process the training correctly.
def check_parameters(self): torch = import_optional_dependency('torch') if not isinstance(self.model, torch.nn.Module): self._raise_format_error('self.model', 'torch.nn.Module', f'{ type(self.model) }') if not isinstance(self.optimizer, torch.optim.Optimizer): self._raise_format_error('self.optimizer', 'torch.optim.Optimizer', f'{ type(self.optimizer) }') if not isinstance(self.train_dataset, torch.utils.data.DataLoader): self._raise_format_error('self.train_dataset', 'torch.utils.data.DataLoader', f'{ type(self.train_dataset) }') if not isinstance(self.eval_dataset, torch.utils.data.DataLoader): self._raise_format_error('self.eval_dataset', 'torch.utils.data.DataLoader', f'{ type(self.eval_dataset) }')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self) -> None:\n # validate training config\n super().check()", "def _check_parameters(self, X):\n _, n_features = X.shape\n\n if self.weights_init is not None:\n self.weights_init = _check_weights(self.weights_init,\n self.n_components)", "def _check_params(self):\n pass", "def _check_params(self):\n if self.k_initial <= 0 :\n raise ValueError('Initial K should be 1 or more.')", "def _check_params(self):\n\n # verify that estimator1 and estimator2 have predict_proba\n if (not hasattr(self.estimator1_, 'predict_proba') or\n not hasattr(self.estimator2_, 'predict_proba')):\n raise AttributeError(\"Co-training classifier must be initialized \"\n \"with classifiers supporting \"\n \"predict_proba().\")\n\n if (self.p_ is not None and self.p_ <= 0) or (self.n_ is not None and\n self.n_ <= 0):\n raise ValueError(\"Both p and n must be positive.\")\n\n if self.unlabeled_pool_size <= 0:\n raise ValueError(\"unlabeled_pool_size must be positive.\")\n\n if self.num_iter <= 0:\n raise ValueError(\"num_iter must be positive.\")", "def check(self) -> None:\n # check existence\n self.check_key_exists()\n\n # validate training config\n TrainConfigValidator(self.config[\"TRAIN_CONFIG\"], log=False).check()\n # if different training policy at prune is not specified\n if \"TRAIN_CONFIG_AT_PRUNE\" not in self.config:\n self.config[\"TRAIN_CONFIG_AT_PRUNE\"] = self.config[\"TRAIN_CONFIG\"]\n TrainConfigValidator(self.config[\"TRAIN_CONFIG_AT_PRUNE\"], log=False).check()\n\n # validate prune config\n self.check_prune_methods()\n\n # if SEED is not specified, set it same as training config's SEED\n if \"SEED\" not in self.config:\n self.config[\"SEED\"] = self.config[\"TRAIN_CONFIG\"][\"SEED\"]\n\n assert 0 < self.config[\"N_PRUNING_ITER\"]\n assert isinstance(self.config[\"N_PRUNING_ITER\"], int)", "def check(self) -> None:\n # validate pruning config\n super().check()\n\n assert self.config[\"TRAIN_CONFIG\"][\"MODEL_NAME\"] in {\n \"densenet\",\n \"quant_densenet\",\n \"simplenet\",\n \"quant_simplenet\",\n }, f\"{self.config['TRAIN_CONFIG']['MODEL_NAME']} is not supported\"", "def check_params(self):\r\n \r\n # TODO: More cases?\r\n\r\n if self.N <= 0:\r\n print('Bad Parameter: N')\r\n \r\n if self.Ha_tally <= 0 or self.Ha_tally > self.N:\r\n print('Bad Parameter: Reported winner tally')\r\n \r\n if len(self.round_sched) < 1 or not self.check_inc_sched(self.round_sched):\r\n print('Bad Parameter: Round Schedule')\r\n\r\n if self.alpha <= 0 or self.alpha >= .5:\r\n print('Bad Parameter: Alpha')", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecGnomv0_1.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataQ, \"Scattering vector values are missing\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataValues, \"Experimental intensity values are missing\")", "def _check_params(self):\n if self.n_estimators <= 0:\n raise ValueError(\"n_estimators must be greater than 0 but \"\n \"was %r\" % self.n_estimators)\n\n if self.learning_rate <= 0.0:\n raise ValueError(\"learning_rate must be greater than 0 but \"\n \"was %r\" % self.learning_rate)\n\n if (self.loss not in self._SUPPORTED_LOSS\n or self.loss not in LOSS_FUNCTIONS):\n raise ValueError(\"Loss '{0:s}' not supported. \".format(self.loss))\n\n if self.loss == 'deviance':\n loss_class = (MultinomialDeviance\n if len(self.classes_) > 2\n else BinomialDeviance)\n else:\n loss_class = LOSS_FUNCTIONS[self.loss]\n\n if self.loss in ('huber', 'quantile'):\n self.loss_ = loss_class(self.n_classes_, self.alpha)\n else:\n self.loss_ = loss_class(self.n_classes_)\n\n if not (0.0 < self.subsample <= 1.0):\n raise ValueError(\"subsample must be in (0,1] but \"\n \"was %r\" % self.subsample)\n\n if self.init is not None:\n if isinstance(self.init, six.string_types):\n if self.init not in INIT_ESTIMATORS:\n raise ValueError('init=\"%s\" is not supported' % self.init)\n else:\n if (not hasattr(self.init, 'fit')\n or not hasattr(self.init, 'predict')):\n raise ValueError(\"init=%r must be valid BaseEstimator \"\n \"and support both fit and \"\n \"predict\" % self.init)\n\n if not (0.0 < self.alpha < 1.0):\n raise ValueError(\"alpha must be in (0.0, 1.0) but \"\n \"was %r\" % self.alpha)\n\n if isinstance(self.max_features, six.string_types):\n if self.max_features == \"auto\":\n # if is_classification\n if self.n_classes_ > 1:\n max_features = max(1, int(np.sqrt(self.n_features_)))\n else:\n # is regression\n max_features = self.n_features_\n elif self.max_features == \"sqrt\":\n max_features = max(1, int(np.sqrt(self.n_features_)))\n elif self.max_features == \"log2\":\n max_features = max(1, int(np.log2(self.n_features_)))\n else:\n raise ValueError(\"Invalid value for max_features: %r. \"\n \"Allowed string values are 'auto', 'sqrt' \"\n \"or 'log2'.\" % self.max_features)\n elif self.max_features is None:\n max_features = self.n_features_\n elif isinstance(self.max_features, (numbers.Integral, np.integer)):\n max_features = self.max_features\n else: # float\n if 0. < self.max_features <= 1.:\n max_features = max(int(self.max_features *\n self.n_features_), 1)\n else:\n raise ValueError(\"max_features must be in (0, n_features]\")\n\n self.max_features_ = max_features", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatGnomv1_0.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.inputCurve, \"No input Curve file provided\")", "def __validate__(self):\n if self.train:\n assert self.random is not None", "def check_params(self):\n raise NotImplementedError", "def checkParameters(self):\n self.DEBUG(\"EDPluginControlStitchImagev1_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().getInputImages(), \"No input Images\")", "def Check(self, parameters):", "def checkParameters(self):\n self.DEBUG(\"EDPluginWaitMultiFile.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedFile, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedSize, \"Data Input is None\")", "def _check_model_params(self):\n enc_type = self.configs['encoder_type']\n if enc_type not in ['uni', 'bi', 'gnmt']:\n raise ValueError(\"encoder type must be one of ['uni', 'bi', 'gnmt'].\")\n\n attn = self.configs['attention']\n if attn not in ['', 'luong', 'scaled_luong', 'bahdanau', 'normed_bahdanau']:\n raise ValueError(\n \"attention must be one of \"\n \"['', 'luong', 'scaled_luong', 'bahdanau', 'normed_bahdanau'].\")\n\n num_enc_layers = self.configs['num_encoder_layers']\n num_dec_layers = self.configs['num_decoder_layers']\n if not num_enc_layers or not num_dec_layers:\n raise ValueError(\n \"num_encoder_layers and num_decoder_layers must be greater than 0.\")\n if num_enc_layers != num_dec_layers:\n self.configs['pass_hidden_state'] = False\n\n if enc_type == \"bi\" and num_enc_layers % 2 != 0:\n raise ValueError(\n \"num_encoder_layers must be even when encoder_type is %s.\" % enc_type)\n\n attn_arch = self.configs.get('attention_architecture', None)\n if attn_arch in [\"gnmt\"] and num_enc_layers < 2:\n raise ValueError(\"For gnmt attention architecture, \"\n \"num_encoder_layers: %d should be >= 2.\" %\n num_enc_layers)\n\n infer_mode = self.configs['infer_mode']\n beam_width = self.configs.get(\"beam_width\", 0)\n if infer_mode == \"beam_search\" and beam_width <= 0:\n raise ValueError(\"beam_width must be > 0 if infer_mode is `beam_search`.\")\n\n sample_temp = self.configs.get(\"sampling_temperature\", 0.0)\n if infer_mode == \"sample\" and sample_temp <= 0.0:\n raise ValueError(\n \"sampling_temperature must greater than 0.0 using sample decode.\")\n\n subword_option = self.configs['subword_option']\n if subword_option not in ['', 'bpe', 'spm']:\n raise ValueError(\"subword_option must be one of ['','bpe','spm']\")\n\n num_enc_residual_layers = 0\n num_dec_residual_layers = 0\n if self.configs['residual']:\n if num_enc_layers > 1:\n num_enc_residual_layers = num_enc_layers - 1\n if num_dec_layers > 1:\n num_dec_residual_layers = num_dec_layers - 1\n\n if enc_type == \"gnmt\":\n num_enc_residual_layers = num_enc_layers - 2\n if num_enc_layers == num_dec_layers:\n num_dec_residual_layers = num_enc_residual_layers\n\n self.configs['num_encoder_residual_layers'] = num_enc_residual_layers\n self.configs['num_decoder_residual_layers'] = num_dec_residual_layers", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatcmpv2_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().inputCurve, \"No input 1D curves file provided\")", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def _validate_setup(self, skip=None):\n\n if skip is None:\n skip = {}\n\n required_attributes = {\"optimizer\"}.difference(skip)\n\n for attr in required_attributes:\n if getattr(self, attr, None) is None:\n raise ValueError(f\"The {attr} cannot be None.\")\n\n if self.num_timesteps is not None and self.num_timesteps <= 0:\n raise ValueError(\n f\"The number of timesteps must be positive but is {self.num_timesteps}.\"\n )\n\n if self.ansatz.num_parameters == 0:\n raise QiskitError(\n \"The ansatz cannot have 0 parameters, otherwise it cannot be trained.\"\n )\n\n if len(self.initial_parameters) != self.ansatz.num_parameters:\n raise QiskitError(\n f\"Mismatching number of parameters in the ansatz ({self.ansatz.num_parameters}) \"\n f\"and the initial parameters ({len(self.initial_parameters)}).\"\n )", "def check(self) -> None:\n # check existence\n self.check_key_exists()\n\n # check valid range and type\n assert 0 <= self.config[\"MOMENTUM\"] <= 1\n assert isinstance(self.config[\"MOMENTUM\"], float)\n\n assert self.config[\"WEIGHT_DECAY\"] >= 0\n assert isinstance(self.config[\"WEIGHT_DECAY\"], float)\n\n assert self.config[\"SEED\"] >= 0\n assert isinstance(self.config[\"SEED\"], int)\n\n assert self.config[\"BATCH_SIZE\"] > 0\n assert isinstance(self.config[\"BATCH_SIZE\"], int)\n\n assert self.config[\"EPOCHS\"] > 0\n assert isinstance(self.config[\"EPOCHS\"], int)\n\n assert self.config[\"LR\"] > 0\n assert isinstance(self.config[\"LR\"], float)\n\n if \"NESTEROV\" in self.config:\n assert type(self.config[\"NESTEROV\"]) is bool\n else:\n self.config[\"NESTEROV\"] = False # default\n\n if \"CUTMIX\" in self.config:\n cutmix_config = self.config[\"CUTMIX\"]\n assert \"beta\" in cutmix_config\n assert cutmix_config[\"beta\"] > 0\n assert \"prob\" in cutmix_config\n assert 0 < cutmix_config[\"prob\"] <= 1\n\n if \"AUG_TRAIN_PARAMS\" in self.config:\n assert isinstance(self.config[\"AUG_TRAIN_PARAMS\"], dict)\n else:\n self.config[\"AUG_TRAIN_PARAMS\"] = dict()\n\n if \"AUG_TEST_PARAMS\" in self.config:\n assert isinstance(self.config[\"AUG_TEST_PARAMS\"], dict)\n else:\n self.config[\"AUG_TEST_PARAMS\"] = dict()\n\n self.check_criterion()\n self.check_lr_schedulers()\n self.check_regularizer()", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecVideov10.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().getInputImagePath(), \"inputImage list is None\")\n for oneXSDataFile in self.getDataInput().getInputImagePath():\n self.checkMandatoryParameters(oneXSDataFile.getPath().getValue(), \"input Image does not exist\" + oneXSDataFile.marshal())", "def _further_validate_and_setup(self) -> None:\n\n # Make sure parameters make sense/are valid\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The lists of of learners and parameter '\n 'grids must be the same size.')\n if (self.validated['hashed_features'] is not None\n and self.validated['hashed_features'] == 0):\n self.validated['hashed_features'] = self._n_features_feature_hashing\n if self.validated['lognormal'] and self.validated['power_transform']:\n raise SchemaError(autos=None,\n errors='Both \"lognormal\" and \"power_transform\" '\n 'were set simultaneously.')\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The \"learners\" and \"param_grids\" '\n 'parameters were both set and the '\n 'lengths of the lists are unequal.')", "def check_parameters(args):\n inference_algorithm = args[\"inference_algorithm\"]\n combination_algorithm = args[\"combination_algorithm\"]\n measures = args[\"measures\"]\n prior = args[\"prior\"]\n inverse_dynamics_model_checkpoint = args[\"inverse_dynamics_model_checkpoint\"]\n\n check_in(\n \"inference_algorithm\",\n inference_algorithm,\n [\n \"rlsp\",\n \"latent_rlsp\",\n \"latent_rlsp_ablation\",\n \"sampling\",\n \"deviation\",\n \"reachability\",\n \"spec\",\n ],\n )\n check_in(\n \"combination_algorithm\",\n combination_algorithm,\n (\"additive\", \"bayesian\", \"latent_vi\", \"latent_ppo\"),\n )\n check_in(\"prior\", prior, [\"gaussian\", \"laplace\", \"uniform\"])\n\n for i, measure in enumerate(measures):\n check_in(\n \"measure {}\".format(i),\n measure,\n [\"inferred_reward\", \"true_reward\", \"final_reward\", \"model_training_error\"],\n )\n\n if combination_algorithm == \"bayesian\":\n check_in(\"inference_algorithm\", inference_algorithm, [\"rlsp\", \"sampling\"])\n\n if inference_algorithm == \"latent_rlsp\":\n check_not_none(\n \"inverse_dynamics_model_checkpoint\", inverse_dynamics_model_checkpoint\n )\n\n if (\n combination_algorithm.startswith(\"latent\")\n and inference_algorithm != \"latent_rlsp\"\n ):\n raise ValueError(\n \"combination_algorithm 'latent' should only be used with 'latent_rlsp'\"\n )", "def check_additional_input():\r\n\r\n # Check if the cluster center input is correct\r\n RM.check_if_matrix(clust_cent, 'The cluster centers')\r\n RM.warn_if_bigger(clust_cent.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the cluster centers',\r\n 'the number of input parameters - 1')\r\n RM.check_if_bigger(clust_cent.shape[1], meta_model.get_in_par_means().shape[1] - 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the cluster centers')\r\n\r\n bounds = meta_model.get_in_par_intervals()\r\n\r\n for j in range(clust_cent.shape[0]):\r\n for i in range(bounds.shape[0]):\r\n RM.check_if_in_interval(bounds[i], clust_cent[j, i], i, ' In cluster center %x, the value')\r\n\r\n def check_PLSR_input():\r\n \"\"\" Checks model data of PLSR\r\n\r\n :return: Checks model data of PLSR\r\n \"\"\"\r\n\r\n RM.check_if_ndim_array(model_data, 3, 'Model data')\r\n RM.check_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the solution matrix',\r\n 'the number of input parameters')\r\n RM.warn_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1] + 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the solution matrix')\r\n RM.check_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1] - 1,\r\n 'The number of output parameters in the solution matrix',\r\n 'the number of output parameters')\r\n RM.warn_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1],\r\n 'The number of output parameters',\r\n 'the number of output parameters in the solution matrix')\r\n\r\n # Check if the additional data is correct\r\n\r\n if meta_model.get_type() == 'PLSR': # Additional check-up for PLSR\r\n check_PLSR_input()\r\n\r\n elif meta_model.get_type() == 'DLU': # Additional check-up for DLU\r\n raise TypeError('This part is not implemented yet')\r\n\r\n # if not isinstance(model_data, np.ndarray):\r\n # raise TypeError('The cluster input and output data is not stored in a multidimensional array')\r\n #\r\n # for clust_data in model_data:\r\n #\r\n # if not isinstance(clust_data[0], np.matrix) or not isinstance(clust_data[1], np.matrix):\r\n # raise TypeError('One of the input or output databases is not a matrix')\r\n #\r\n # if clust_data[0].shape[1] > meta_model.get_in_par_means().shape[1]:\r\n # warnings.warn('The number of input parameters for the input database of the clusters is bigger '\r\n # 'than the actual number of input parameters')\r\n #\r\n # elif clust_data[0].shape[1] < meta_model.get_in_par_means().shape[1]:\r\n # raise TypeError('The number of input parameters for the input database of the clusters is '\r\n # 'smaller than the actual numbers of input parameters')\r\n #\r\n # if clust_data[1].shape[1] > meta_model.get_out_par_means().shape[1]:\r\n # raise TypeError('The number of output parameters for the output database of the clusters is '\r\n # 'bigger than the actual number of output parameters')\r\n #\r\n # elif clust_data[1].shape[1] < meta_model.get_out_par_means().shape[1]:\r\n # raise TypeError('The number of output parameters for the output database of the clusters is '\r\n # 'smaller than the actual numbers of output parameters')\r\n #\r\n # if clust_data[0].shape[0] != clust_data[1].shape[0]:\r\n # raise TypeError('The number rows in the input and output database differ from each other')\r\n\r\n else: # No check-up is done when the meta-model is an unknown version\r\n warnings.warn('The additional cluster data can not be checked, for this kind of meta-model')\r\n\r\n RM.check_if_same_size(clust_cent.shape[0], model_data.shape[0],\r\n 'The number of clusters according to the cluster centers',\r\n 'The number of clusters according to the model_data')", "def _validate(self):\n for p in self.parameters:\n #Check for missing required parameters:\n if p.is_required and not(p.is_set):\n raise ValueError(\"Parameter %s is not set.\" \\\n % p.names[-1])\n #Also repeat the parameter validation here, just in case?", "def _check_required_params(self):\n logging.debug('.. check if Experiment have all required parameters')\n for n in self.REQUIRED_PARAMS:\n if n not in self.params:\n raise ValueError('missing \"%s\" among %r' % (n, self.params.keys()))", "def _validate_parameters(self, epochs, log_interval):\n\n if not epochs > 0:\n msg = (\n \"The number of training epochs = {} should be strictly\"\n \" positive.\"\n )\n self.logger.error(msg.format(epochs))\n raise ValueError(msg.format(epochs))\n\n if not log_interval > 0:\n msg = (\n \"The number of batches to wait before printting the\"\n \" training status should be strictly positive, but got {}\"\n \" instead.\"\n )\n self.logger.error(msg.format(log_interval))\n raise ValueError(msg.format(log_interval))\n\n if not 0 < self.shrinkage_rate <= 1:\n msg = (\n \"The shrinkage rate should be in the range (0, 1], but got\"\n \" {} instead.\"\n )\n self.logger.error(msg.format(self.shrinkage_rate))\n raise ValueError(msg.format(self.shrinkage_rate))", "def _check_log_params(self):\n steps_per_stats = self.configs['steps_per_stats']\n if not steps_per_stats or steps_per_stats < 0:\n steps_per_stats = 100\n steps_per_eval = self.configs['steps_per_eval']\n if not steps_per_eval:\n steps_per_eval = 10 * steps_per_stats\n steps_per_external_eval = self.configs['steps_per_external_eval']\n if not steps_per_external_eval:\n steps_per_external_eval = 5 * steps_per_eval\n self.configs['steps_per_stats'] = steps_per_stats\n self.configs['steps_per_eval'] = steps_per_eval\n self.configs['steps_per_external_eval'] = steps_per_external_eval", "def _check_inputs(self):\n\n # Check if attributes exists\n if self.attributes is None:\n print(\"attributes is missing; call set_attributes(new_attributes) to fix this! new_attributes should be a\",\n \"populated dataset of independent variables.\")\n return False\n\n # Check if labels exists\n if self.labels is None:\n print(\"labels is missing; call set_labels(new_labels) to fix this! new_labels should be a populated dataset\",\n \"of dependent variables.\")\n return False\n\n # Check if attributes and labels have same number of rows (samples)\n if self.attributes.shape[0] != self.labels.shape[0]:\n print(\"attributes and labels don't have the same number of rows. Make sure the number of samples in each\",\n \"dataset matches!\")\n return False\n\n # Type-checking for fit_intercept, normalize, and copy_X isn't needed; these can accept truthy/falsy values\n\n # Check if n_jobs is an integer or None\n if self.n_jobs is not None and not isinstance(self.n_jobs, int):\n print(\"n_jobs must be None or an integer; call set_n_jobs(new_n_jobs) to fix this!\")\n return False\n\n # Check if test_size is a float or None\n if self.test_size is not None and not isinstance(self.test_size, (int, float)):\n print(\"test_size must be None or a number; call set_test_size(new_test_size) to fix this!\")\n return False\n\n return True", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('r', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Radius r must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Radius r must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n # Check piezometer depth\n elif 'z' in self.parameters:\n z = self.parameters.get('z', -1)\n if type(z) not in [int, float]:\n flag = False\n warnings += \"Depth of piezometer must be a float value\\n\"\n else:\n if z < 0:\n flag = False\n warnings += \"Depth z must be higher than 0\\n\"\n else:\n flag = False\n warnings += \"Well don't contain well depth attributes\\n\"\n return(flag, warnings) # End Function", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('rw', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Well radius rw must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Well radius rw must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n return(flag, warnings) # End Function", "def _check_params(self):\n\t\tstrange_param_helper = False\n\t\tfor param in self.params:\n\t\t\n\t\t\t# It could be that the param encapsulates several values (e.g., \"FLUX_RADIUS(10)\")\n\t\t\t# So we have to dissect this\n\t\t\tmatch = re.compile(\"(\\w*)\\(\\d*\\)\").match(param)\n\t\t\tif match:\n\t\t\t\tcleanparam = match.group(1)\n\t\t\telse:\n\t\t\t\tcleanparam = param\n\t\t\t\t\n\t\t\tif cleanparam not in self.fullparamlist:\n\t\t\t\tlogger.warning(\"Parameter '%s' seems strange and might be unknown to SExtractor\" \\\n % (param))\n\t\t\t\tstrange_param_helper = True\n\t\t\t\t\n\t\tif strange_param_helper:\n\t\t\tlogger.warning(\"Known parameters are: %s\" % (self.fullparamtxt))", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _training_before_hook(self):\n pass", "def check_params(self, params):\n legal_params_fns = [\n Sequential.fit, Sequential.predict, Sequential.predict_classes,\n Sequential.evaluate\n ]\n if self.build_fn is None:\n legal_params_fns.append(self.__call__)\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n legal_params_fns.append(self.build_fn.__call__)\n else:\n legal_params_fns.append(self.build_fn)\n\n legal_params = []\n for fn in legal_params_fns:\n legal_params += tf_inspect.getargspec(fn)[0]\n legal_params = set(legal_params)\n\n for params_name in params:\n if params_name not in legal_params:\n if params_name != 'nb_epoch':\n raise ValueError('{} is not a legal parameter'.format(params_name))", "def validate_parameters(self):\n\n # env and fixed_env\n self._validate_envs()\n # checking optional data and scripts\n self._validate_download_data()\n self.data_path = self.params[\"data\"][\"location\"]\n self._validate_scripts()\n # checking optional data_ref (if not data_ref provided, path is the same as data path)\n if \"data_ref\" in self.params:\n self._validate_download_data(data_nm=\"data_ref\")\n# self.data_ref_path = self.params[\"data_ref\"][\"location\"]\n# else:\n# self.data_ref_path = self.data_path\n # checking analysis\n self._validate_analysis()\n # checking tests\n self._validate_tests()\n\n self.params.setdefault(\"post_build\", None)\n # if copy in post_build part that I'm changing the build_context\n if self.params[\"post_build\"] and \"copy\" in self.params[\"post_build\"]:\n self.build_context = self.workflow_path\n else:\n self.build_context = self.working_dir\n\n self.params.setdefault(\"plots\", [])\n if self.params[\"plots\"]:\n if not isinstance(self.params[\"plots\"], (list, tuple)):\n raise SpecificationError(\n \"Value of key 'plots' must be a list or a tuple\"\n )\n else:\n if any(not isinstance(j, dict) for j in self.params[\"plots\"]):\n raise SpecificationError(\n \"Every item in 'plots' must be a dictionary.\"\n )", "def checkNeededParams(self):\n for clp,value in self.neededParamsNames.items():\n if value[0] not in self.neededParams:\n print >> sys.stderr, clp+\" is a mandatory parameter \"\n self.printUsage()\n sys.exit(1)", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def check(self):\n if 'MISFIT' not in PAR:\n setattr(PAR, 'MISFIT', 'Waveform')\n\n if 'CHANNELS' not in PAR:\n raise ParameterError(PAR, 'CHANNELS')\n\n if 'READER' not in PAR:\n raise ParameterError(PAR, 'READER')\n\n if 'WRITER' not in PAR:\n setattr(PAR, 'WRITER', PAR.READER)\n\n if 'NORMALIZE' not in PAR:\n setattr(PAR, 'NORMALIZE', True)\n\n # mute settings\n if 'MUTE' not in PAR:\n setattr(PAR, 'MUTE', False)\n\n if 'MUTESLOPE' not in PAR:\n setattr(PAR, 'MUTESLOPE', 0.)\n\n if 'MUTECONST' not in PAR:\n setattr(PAR, 'MUTECONST', 0.)\n\n # filter settings\n if 'BANDPASS' not in PAR:\n setattr(PAR, 'BANDPASS', False)\n\n if 'FREQLO' not in PAR:\n setattr(PAR, 'FREQLO', 0.)\n\n if 'FREQHI' not in PAR:\n setattr(PAR, 'FREQHI', 0.)\n\n # assertions\n if PAR.READER not in dir(readers):\n print msg.ReaderError\n raise ParameterError()\n\n if PAR.WRITER not in dir(writers):\n print msg.WriterError\n raise ParameterError()", "def _verify_fit(self) -> None:\n if not hasattr(self, 'X_train') or not hasattr(self, 'Y_train'):\n raise ValueError('Training data not set. Call `fit` and pass training data first.')", "def before_fit(self):\n self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, \"gather_preds\")\n if not self.run:\n return\n\n # Prepare ground truth container, set here as y_true's always stay the same\n self.y_true = []", "def validate_input_params(self):\n if isinstance(self.parameters, dict):\n # Setup the mandatory params for snowflake load\n mandatory_keys = ('load_type', 'hive_database', 'hive_table', 'sfSchema', 'sfTable', 'sfGrantee_roles')\n if not all(key in self.parameters for key in mandatory_keys):\n logging.info(\"Mandatory keys for GenieSnowflakeOperator(parameters): %s\\n\" % format(mandatory_keys))\n logging.error(\"Mandatory key(s) NOT exists in GenieSnowflakeOperator(parameters): %s\\n\" % format(self.parameters))\n raise Exception(\"Job failed\")\n\n # Setting up pre,post and grants scripts for snowflake\n self.sfPresteps_sql = self.parameters.get('sfPresteps_sql', self.sfPresteps_sql)\n self.sfPoststeps_sql = self.parameters.get('sfPoststeps_sql', self.sfPoststeps_sql)\n self.sfPostgrants_sql = self.parameters.get('sfPostgrants_sql', self.sfPostgrants_sql)\n else:\n logging.error(\"Input is NOT a dictionary: %s\\n\" % format(self.parameters))\n raise Exception(\"Job failed\")", "def check_config(params, dannce_net, prediction):\n check_camnames(params)\n\n if params[\"exp\"] is not None:\n for expdict in params[\"exp\"]:\n check_camnames(expdict)\n\n if dannce_net:\n check_net_expval(params)\n check_vmin_vmax(params)", "def test_training(self):\n\t\tpass", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def check_data(self):\n\n missing_params = {}\n flag = False\n\n missing_params['general'] = {}\n for name, param in self.params.items():\n if not param.check():\n missing_params['general'][name] = param.get_description()\n flag = True\n\n for component, comp_obj in self.components.items():\n missing_params[component], flag_comp = comp_obj.check_data()\n\n # Assign empty component parameters that have a general version:\n empty_general_params = set(missing_params[component]).intersection(\n set(self.params))\n for param in empty_general_params:\n comp_obj.change_param_object(param, self.params[param])\n del missing_params[component][param]\n\n if missing_params[component]:\n flag = True\n\n if flag:\n raise Exception('Following parameters are missing:\\n{}'\n .format(\n self._print_params(missing_params, disp=False)))\n\n return True", "def checkDataParameters(self):\n while True:\n overideSamplesFlag = input(\"Default data parameters are:-\\n\\n Number of samples = %d\\n Number of replicates = %d\\n Number of QC replicates = %d\\n Number of solvent replicates = %d\\n File polarity mode = %s\\n Column type = %s\\n\\nChange these sample parameters (Y or N)? [N]: \" %\n (self.numberOfSamples, self.numberOfTechReps, self.numberOfQCReps, self.numberOfSolventReps, self.filePolarityMode, self.columnType)) or 'N'\n if overideSamplesFlag.upper() == 'N':\n break\n elif overideSamplesFlag.upper() == 'Y':\n self.setNumSamples()\n self.__setNumberOfTechReps__()\n self.__setNumberOfQCReps__()\n self.__setNumberOfSolventReps__()\n self.__setFilePolarityMode__()\n self.__setColumnType__()\n break\n else:\n print(\"\\nY or N required!\")", "def check_params(params):\n if 'shuffle_seed' not in params:\n params['shuffle_seed'] = None\n\n if trainers_num > 1 and params['shuffle_seed'] is None:\n raise ShuffleSeedException()\n\n data_dir = params.get('data_dir', '')\n assert os.path.isdir(data_dir), \\\n \"{} doesn't exist, please check datadir path\".format(data_dir)\n\n if params['mode'] != 'test':\n file_list = params.get('file_list', '')\n assert os.path.isfile(file_list), \\\n \"{} doesn't exist, please check file list path\".format(file_list)", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginControlAbsorptionv0_1.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")", "def check_required_parameters(required_params_dict=dict()):\r\n print threading.currentThread().getName(), 'Starting'\r\n is_valid = True\r\n required_params_not_set = pythontools.validate_required_parameters(required_params_dict)\r\n if len(required_params_not_set) > 0:\r\n is_valid = False\r\n msg = \"Validate all required input parameters are set failed.\"\r\n for param in required_params_not_set:\r\n steplog.error(\"Required parameter %s is not set.\" % param)\r\n else:\r\n msg = \"Validate all required input parameters are set succeeded.\"\r\n return is_valid, msg", "def _validate_parameters(self):\n super()._validate_parameters()\n\n #################### callbacks ####################\n self.callbacks = check_callback(self.callbacks)", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDamstartv0_3.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.getInputPdbFile(), \"No template file specified\")", "def _check_parameters(self, target_function, **kwargs):\n # Ensure all arguments are =< 0 where relevant\n for keyword, value in kwargs.items():\n # Two conditions\n value_is_less_than_zero = value < 0\n keyword_is_relevant = keyword in ['mean', 'constant', 'low', 'mode', 'high']\n # Test conditions\n if keyword_is_relevant and value_is_less_than_zero:\n raise FairException('\"{}\" is less than zero.'.format(keyword))\n # Check that all required keywords are provided\n required_keywords = self._required_keywords[target_function]\n for required_keyword in required_keywords:\n if required_keyword in kwargs.keys():\n pass\n else:\n raise FairException('\"{}\" is missing \"{}\".'.format(str(target_function), required_keyword))", "def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))", "def check_all_user_inputs_valid(self):\n self.check_RNN_layers_valid()\n self.check_activations_valid()\n self.check_embedding_dimensions_valid()\n self.check_initialiser_valid()\n self.check_y_range_values_valid()\n self.check_return_final_seq_only_valid()", "def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'root', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def _check_training(\n self, model, x_train, y_train, loss_name, num_epochs=2, learning_rate=0.001\n ):\n # create loss function\n loss = getattr(crypten.nn, loss_name)()\n\n for i in range(num_epochs):\n output = model(x_train)\n loss_value = loss(output, y_train)\n\n # set gradients to \"zero\"\n model.zero_grad()\n for param in model.parameters():\n self.assertIsNone(param.grad, \"zero_grad did not reset gradients\")\n\n # perform backward pass\n loss_value.backward()\n for param in model.parameters():\n if param.requires_grad:\n self.assertIsNotNone(\n param.grad, \"required parameter gradient not created\"\n )\n\n # update parameters\n orig_parameters, upd_parameters = {}, {}\n orig_parameters = self._compute_reference_parameters(\n \"\", orig_parameters, model, 0\n )\n model.update_parameters(learning_rate)\n upd_parameters = self._compute_reference_parameters(\n \"\", upd_parameters, model, learning_rate\n )\n\n # check parameter update\n parameter_changed = False\n for name, value in orig_parameters.items():\n if param.requires_grad and param.grad is not None:\n unchanged = torch.allclose(upd_parameters[name], value)\n if unchanged is False:\n parameter_changed = True\n self.assertTrue(\n parameter_changed, \"no parameter changed in training step\"\n )\n\n # record initial and current loss\n if i == 0:\n orig_loss = loss_value.get_plain_text()\n curr_loss = loss_value.get_plain_text()\n\n # check that the loss has decreased after training\n self.assertTrue(\n curr_loss.item() < orig_loss.item(),\n f\"{loss_name} has not decreased after training\",\n )", "def test_num_trainable_params():\n model = micronet.cifar.linear_model.create_model()\n assert test.util.count_trainable_params(model) \\\n == cifar_linear_model.NUM_TRAINABLE_PARAM\n # Just for sanity sake, so that I know the true value and if it changes:\n assert cifar_linear_model.NUM_TRAINABLE_PARAM == 172900", "def check_params(params):\n\n required = ['gtsrb_train_root', 'gtsrb_test_root', 'batch_size']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def _check_parameters_support(self, parameters=()):\n for parameter in parameters:\n assert parameter in self._supported, \"Estimation %s is not implemented yet\" % parameter", "def _pre_submit_check(self):\n if (\n any([task.task_type == TaskType.SWITCH for task in self.tasks.values()])\n and self.param is None\n and all([len(task.local_params) == 0 for task in self.tasks.values()])\n ):\n raise PyDSParamException(\n \"Parameter param or at least one local_param of task must \"\n \"be provider if task Switch in process definition.\"\n )", "def check_PLSR_input():\r\n\r\n RM.check_if_ndim_array(model_data, 3, 'Model data')\r\n RM.check_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the solution matrix',\r\n 'the number of input parameters')\r\n RM.warn_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1] + 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the solution matrix')\r\n RM.check_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1] - 1,\r\n 'The number of output parameters in the solution matrix',\r\n 'the number of output parameters')\r\n RM.warn_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1],\r\n 'The number of output parameters',\r\n 'the number of output parameters in the solution matrix')\r\n\r\n # Check if the additional data is correct\r", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def _evaluate_model_parameters(self, session):\n logger.info('There are no model specific operation evaluation!')", "def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def _pre_fit(self):\n pass", "def check_params(self, model_params):\n return model_params", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecThumbnailv10.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")", "def check_criterion(self) -> None:\n # get criterion class lists\n criterion_names = get_class_names_in_files(\n \"src\" + os.path.sep + \"criterions.py\"\n )\n criterion_names.remove(\"Criterion\")\n\n # Check config criterion exists\n assert self.config[\"CRITERION\"] in criterion_names\n\n # Run criterion config check\n params: Dict[str, Any] = self.config[\"CRITERION_PARAMS\"]\n\n ce_params = None\n if self.config[\"CRITERION\"] == \"HintonKLD\":\n assert \"T\" in params\n assert params[\"T\"] > 0.0\n assert isinstance(params[\"T\"], float)\n\n assert \"alpha\" in params\n assert 0.0 <= params[\"alpha\"] <= 1.0\n assert isinstance(params[\"alpha\"], float)\n\n # check additional params(teacher) exist\n assert \"teacher_model_name\" in params\n assert isinstance(\"teacher_model_name\", str)\n assert \"teacher_model_params\" in params\n\n # if HintonLoss contains crossentropy\n assert \"crossentropy_params\" in params\n ce_params = params[\"crossentropy_params\"]\n\n elif self.config[\"CRITERION\"] == \"CrossEntropy\":\n ce_params = self.config[\"CRITERION_PARAMS\"]\n\n if ce_params:\n # assert \"num_classes\" in ce_params\n # assert ce_params[\"num_classes\"] > 0\n # assert isinstance(ce_params[\"num_classes\"], int)\n\n assert \"num_classes\" in ce_params\n assert ce_params[\"num_classes\"] > 0\n assert isinstance(ce_params[\"num_classes\"], int)\n\n if \"label_smoothing\" not in ce_params:\n ce_params[\"label_smoothing\"] = 0.0\n else:\n assert 0.0 <= ce_params[\"label_smoothing\"] < 1.0\n assert type(ce_params[\"label_smoothing\"]) is float", "def has_learned_parameters(self) -> bool:\n return any(\n len(params) > 0 for (_, params) in self.get_optimizer_params().items()\n )", "def validate_params(self, params: Scenario) -> bool:\n valid = True\n # Make sure all needed parameters were provided\n valid = valid and \"R\" in params\n valid = valid and \"L\" in params\n\n # Make sure all parameters are physically valid\n valid = valid and params[\"R\"] > 0\n valid = valid and params[\"L\"] > 0\n\n return valid", "def check_input_args(in_arg, phase=\"train\"):\n\n if phase==\"train\": \n # Check that flowers directory exists \n if not path.isdir(in_arg.data_dir):\n print(\"For data loading: can't find directory '{}' starting from '{}'. Please check the paths and run again!\" . format(in_arg.data_dir, os.getcwd()))\n sys.exit(0)\n \n # Check that checkpoints directory exists\n if not path.isdir(in_arg.save_dir):\n print(\"For checkpoints saving: can't find directory '{}' starting from '{}'. Please check the paths and run again!\" . format(in_arg.save_dir, os.getcwd()))\n sys.exit(0) \n \n else:\n # phase == predict\n # Check that the flower name exists. Example: \"/data/flowers/test/25/image_06583.jpg\"\n if not path.isfile(in_arg.image_file):\n print(\"Image file: can't find file '{}' starting from '{}'. Please check the path, filename and run again!\" . format(in_arg.image_file, os.getcwd()))\n sys.exit(0) \n \n if not path.isfile(in_arg.checkpoint):\n print(\"Checkpoint file: can't find file '{}' starting from '{}'. Please check the path, filename and run again!\" . format(in_arg.checkpoint, os.getcwd()))\n sys.exit(0)\n \n if in_arg.category_names and not path.isfile(in_arg.category_names):\n print(\"Category names file: can't find file '{}' starting from '{}'. Please check the path, filename and run again!\" . format(in_arg.category_names, os.getcwd()))\n sys.exit(0) \n \n # All cases\n\n # Check that the architecture is supported\n if in_arg.arch not in ['alexnet', 'resnet18', 'vgg19_bn']:\n print(\"Architecture can only be: alexnet, resnet18 or vgg19_bn. Please check the architecture and run again!\")\n sys.exit(0) \n \n # Check that a valid value has been set for gpu\n if in_arg.gpu != 0 and in_arg.gpu != 1:\n print(\"GPU can only be set to 0 (disable) or 1 (enable)! Please check the value and run again!\")\n sys.exit(0)", "def validate_parameters(hyperparams):\n try:\n # Check Hyperparameter Type\n if not isinstance(hyperparams, dict):\n raise ValueError('Provided hyperparameter is not valid.')\n\n # Global Hyperparameter Check\n if 'global' in hyperparams:\n params = hyperparams['global']\n else:\n raise ValueError('Global parameters have not been defined.')\n\n if 'learning_rate' in params:\n if params['learning_rate'] < 0:\n raise ValueError('learning_rate={} must be strictly '\n 'positive'.format(params['learning_rate']))\n else:\n raise ValueError('learning_rate has not been defined.')\n\n if 'loss' in params:\n if params['loss'] not in VALID_LOSSES:\n raise ValueError('Loss {} is currently not supported.'\n 'Accpted losses: {}'.format(params['loss'],\n ', '.join(VALID_LOSSES)))\n else:\n raise ValueError('loss has not been defined.')\n\n if 'num_classes' in params:\n # Validate Class Parameter Types\n if type(params['num_classes']) is not int:\n raise ValueError('Provided classes value\\'s type is not valid, '\n 'should be an int value >= 2 for classification.')\n\n # Validate Classification Case\n if params['loss'] != 'least_squares':\n if params['num_classes'] < 0:\n raise ValueError('Provided class value must be >= 2 for '\n 'classification.')\n\n if params['loss'] == 'binary_crossentropy' and \\\n params['num_classes'] != 2:\n raise ValueError('Binary class models must have class of 2.')\n elif params['loss'] == 'categorical_crossentropy' and \\\n params['num_classes'] <= 2:\n raise ValueError('Multiclass models must have class > 2.')\n elif params['loss'] == 'auto':\n if params['num_classes'] < 2:\n raise ValueError('Class value must be >= 2.')\n else:\n logging.warning(\n 'Obtaining class labels based on local dataset. '\n 'This may cause failures during aggregation '\n 'when parties have distinctive class labels.')\n else:\n # Handle Classes Not Defined Case\n if params['loss'] != 'least_squares':\n raise ValueError('Classes has not been defined. Should provide '\n 'a value >= 2 for classification models.')\n\n if 'max_bins' in params:\n if not (2 <= params['max_bins'] and params['max_bins'] <= 255):\n raise ValueError('max_bins={} should be no smaller than 2 '\n 'and no larger than 255.'.format(params['max_bins']))\n\n if 'max_iter' in params:\n if params['max_iter'] < 1:\n raise ValueError('max_iter={} must not be smaller '\n 'than 1.'.format(params['max_iter']))\n else:\n raise ValueError('max_iter has not been defined.')\n\n if 'max_depth' in params:\n if params['max_depth'] is not None and params['max_depth'] <= 1:\n raise ValueError('max_depth={} must be strictly greater'\n 'than 1.'.format(params['max_leaf_nodes']))\n\n if 'max_leaf_nodes' in params:\n if params['max_leaf_nodes'] is not None and params['max_leaf_nodes'] <= 1:\n raise ValueError('max_leaf_nodes={} must be strictly greater'\n 'than 1.'.format(params['max_leaf_nodes']))\n\n if 'min_samples_leaf' in params:\n if params['min_samples_leaf'] is not None and params['min_samples_leaf'] < 0:\n raise ValueError('min_sample_leaf={} must not be smaller '\n 'than 0'.format(params['min_samples_leaf']))\n\n except Exception as ex:\n logger.exception(str(ex))\n raise HyperparamsException('Defined global hyperparameters malformed.')", "def _validate_training_process(self, sess, epoch):\n logger.info('Epoch %d: validating training process ...' % epoch)\n \n if self.val_cpu_only:\n logger.warn('The option \\'val_cpu_only\\' is enabled, but not ' + \\\n 'supported by this class. Option will be ignored.')\n\n val_handle = sess.run(self._val_iter.string_handle())\n sess.run(self._val_iter.initializer,\n feed_dict={self._t_val_raw_in: self._val_batch[0],\n self._t_val_raw_out: self._val_batch[1],\n self._t_val_batch_size: self._val_batch[0].shape[0]})\n\n mi_estimate, mi_real, summary = sess.run( \\\n [self._t_mi, self._t_real_mi, self._t_summaries],\n feed_dict={self._t_handle: val_handle,\n self._t_mi_known: True,})\n\n logger.info('Real MI: %f' % mi_real)\n logger.info('Estimated MI on validation batch: %f' % mi_estimate)\n\n self._val_summary_writer.add_summary(summary, epoch)\n self._val_summary_writer.flush()\n\n logger.info('Epoch %d: validating training process ... Done' % epoch)", "def _is_parameters_ok(self):\n if self.api_key is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs an api_key\")\n if self.location is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs a location\")\n\n return True", "def _is_parameters_ok(self):\n if self.api_key is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs an api_key\")\n if self.location is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs a location\")\n\n return True", "def check_training_samples(self):\n\n yidx = np.sum(self.datas[self.train_idx].gen_labels(), axis=0) < self.kfold_cv\n if np.any(yidx):\n xlist = ','.join(np.array(self.datas[self.train_idx].labels)[yidx])\n print('\\n *** WARNING ***\\n There are labels with very few samples: %s' % xlist)\n print(' If encounter chaotic errors, consider excluding these labels using --excludeloc %s\\n' % xlist)\n\n return", "def _check_inputs(self):\n\n self._check_resident_prefs()\n self._check_hospital_prefs()", "def check_invalid_args_general(config):\n # Not mathematically correct, but might be required if prior is not\n # appropriate.\n if hasattr(config, 'kl_scale') and config.kl_scale != 1.0:\n warnings.warn('Prior matching term will be scaled by %f.'\n % config.kl_scale)\n\n if hasattr(config, 'store_final_model') and \\\n hasattr(config, 'train_from_scratch') and \\\n config.store_final_model and config.train_from_scratch:\n warnings.warn('Note, when training from scratch, the final model is ' +\n 'only trained on the last task!')", "def _validate_cfg(self, ephase, cfg):\n super()._validate_cfg(ephase, cfg)\n\n if (ephase != NNModelPhase.TRAIN): return\n\n if (len(cfg.arch) != len(cfg.lp)):\n raise Exception('Layer purpose string for each layers is not' + \n ' specified. (length of `cfg.arch` != length of `cfg.lp`).')", "def precheck(self):\n if self.__quantities <= 0:\n self.logger.exception(\n '[CPU] quantities invalid: {}, should be positive'.\n format(self.__quantities))\n raise ArgsNotCorrect(\n '[CPU] quantities invalid: {}, should be positive'.\n format(self.__quantities))\n\n if self.__quantities % self.__socket != 0:\n self.logger.exception(\n '[CPU] quantities: {} is not divided by socket: {}'.\n format(self.__quantities, self.__socket))\n raise ArgsNotCorrect(\n '[CPU] quantities: {} is not divided by socket: {}'.\n format(self.__quantities, self.__socket))", "def param_vals_test(param_dict):\n file_msg = param_dict['Prog_msg']\n ##\n ## Testing if `wget` exists in the system\n if is_tool('wget'):\n pass\n else:\n msg = '{0} You need to have `wget` installed in your system to run '\n msg += 'this script. You can download the entire dataset at {1}.\\n\\t\\t'\n msg += 'Exiting....'\n msg = msg.format(file_msg, param_dict['url_catl'])\n raise ValueError(msg)\n ##\n ## Checking that Esmeralda is not ran when doing 'SO' halos\n if (param_dict['halotype'] == 'so') and (param_dict['sample'] == 20):\n msg = '{0} The `halotype`==`so` and `sample`==`20` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format(file_msg)\n raise ValueError(msg)\n ##\n ## Checking that `hod_model_n` is set to zero for FoF-Halos\n if (param_dict['halotype'] == 'fof') and (param_dict['hod_n'] != 0):\n msg = '{0} The `halotype`==`{1}` and `hod_n`==`{2}` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format( file_msg,\n param_dict['halotype'],\n param_dict['hod_n'])\n raise ValueError(msg)\n ##\n ## Checking input different types of `test_train_opt`\n #\n # `sample_frac`\n if (param_dict['test_train_opt'] == 'sample_frac'):\n # `sample_frac`\n if not ((param_dict['sample_frac'] > 0) and\n (param_dict['sample_frac'] <= 1.)):\n msg = '{0} `sample_frac` ({1}) must be between (0,1]'.format(\n file_msg, param_dict['sample_frac'])\n raise ValueError(msg)\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n #\n # boxes_n\n if (param_dict['test_train_opt'] == 'boxes_n'):\n box_n_arr = num.array(param_dict['box_idx'].split('_')).astype(int)\n box_n_diff = num.diff(box_n_arr)\n # Larger than zero\n if not (all(box_n_arr >= 0)):\n msg = '{0} All values in `box_idx` ({1}) must be larger than 0!'\n msg = msg.format(file_msg, box_n_arr)\n raise ValueError(msg)\n # Difference between elements\n if not (all(box_n_diff > 0)):\n msg = '{0} The value of `box_idx` ({1}) is not valid!'.format(\n file_msg, param_dict['box_idx'])\n raise ValueError(msg)\n #\n # `box_test`\n if (param_dict['test_train_opt'] == 'box_sample_frac'):\n # Value of `box_test`\n if not (param_dict['box_test'] >= 0):\n msg = '{0} `box_test` ({1}) must be larger or equal to `0`.'\n msg = msg.format(file_msg, param_dict['box_test'])\n raise ValueError(msg)\n # Testing `test_size`\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n ##\n ## Checking that `kf_splits` is larger than `2`\n if (param_dict['kf_splits'] < 2):\n msg = '{0} The value for `kf_splits` ({1}) must be LARGER than `2`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['kf_splits'])\n raise ValueError(msg)\n ##\n ## Checking that `n_predict` is not smaller than `1`.\n if (param_dict['n_predict'] < 1):\n msg = '{0} The value for `n_predict` ({1}) must be LARGER than `1`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['n_predict'])\n raise ValueError(msg)", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_ValidParams:\", self._testMethodName)", "def no_params(self) -> bool:\n result = True\n # Fixing issue #92\n if self.properties.parameters:\n return False\n else:\n return True\n # for parameter in self.properties.parameters:\n # if parameter == \"effect\":\n # continue\n # else:\n # result = False\n # break\n # return result", "def sanity_check():\n print(\"Running sanity check...\")\n\n N = 20\n dimensions = [10, 5, 10]\n data = np.random.randn(N, dimensions[0]) # each row will be a datum\n labels = np.zeros((N, dimensions[2]))\n for i in range(N):\n labels[i,random.randint(0,dimensions[2]-1)] = 1\n \n params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (\n dimensions[1] + 1) * dimensions[2], )\n\n gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,\n dimensions), params)", "def _check_initialized(self):\n check_is_fitted(self, 'estimators_')", "def _is_parameters_ok(self):\n if self.maxresult is None:\n raise MissingParameterException(\"Arithmetics neuron needs maxresult parameter.\")\n\n return True", "def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()", "def check(self, algorithm_data: AlgorithmData) -> Tuple[bool, str, bool]:\n\n return False, False, \"The check method of the training class has to be implemented\"", "def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))", "def test(self):\n self.training = False", "def check_parameters():\r\n for par in PARAM:\r\n if isinstance(par, ExperimentFrame):\r\n EXP.change_variable(**par())\r\n else:\r\n EXP.change_variable(**par)", "def validate(self, candidate, **kwargs) -> bool:\n return super().validate(candidate, **kwargs) and self._predictor.validate(candidate)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))" ]
[ "0.766322", "0.7477992", "0.7417763", "0.7302848", "0.72800577", "0.72335196", "0.71856815", "0.7176553", "0.71609956", "0.71493304", "0.71282524", "0.71061707", "0.6990331", "0.69771767", "0.69568545", "0.68638104", "0.68232596", "0.68174267", "0.67853534", "0.6761448", "0.67518336", "0.67311805", "0.6703886", "0.6688447", "0.66882664", "0.6684622", "0.66828185", "0.6681152", "0.6666766", "0.6666242", "0.66494644", "0.6624774", "0.65957856", "0.6576762", "0.65508866", "0.65484554", "0.6544916", "0.65433466", "0.6539561", "0.6539561", "0.6539561", "0.65379304", "0.6528582", "0.65111446", "0.6509066", "0.65048665", "0.65012145", "0.6482931", "0.6480848", "0.6470273", "0.646322", "0.644093", "0.64361745", "0.6430774", "0.64131033", "0.6364952", "0.636296", "0.63592607", "0.6348701", "0.634787", "0.6330589", "0.631226", "0.630762", "0.63000005", "0.629509", "0.6284044", "0.62753487", "0.6241091", "0.62356293", "0.622004", "0.6219467", "0.6207404", "0.6207067", "0.619093", "0.618838", "0.6186215", "0.61811894", "0.61789924", "0.6161736", "0.61615527", "0.6160044", "0.6160044", "0.6157439", "0.6143068", "0.6139963", "0.6134923", "0.6130835", "0.61254805", "0.6114982", "0.6109374", "0.6103003", "0.6099776", "0.6099406", "0.6096292", "0.60958827", "0.6093867", "0.6092579", "0.6084627", "0.6078286", "0.6077579" ]
0.6948363
15
Returns the tangent stiffness function, computed symbolically at runtime.
def make_symbolic(symfile_path="fung_Dsym",resym=False): # Construct the quadratic form as a flat list of independent entries q = np.array([sp.symbols('q_{}{}'.format(i,j)) for (i,j) in lin.utri_indices(6)]) # We also need its matrix form Q = np.empty((6,6),dtype=object) for (k,(i,j)) in zip(range(len(q)),lin.utri_indices(6)): Q[i,j] = q[k] Q[j,i] = Q[i,j] # Construct the Lagrangian strain (E) as a *vector* (e) f = np.array([sp.symbols('f_{i}'.format(i=i)) for i in range(9)]) F = np.reshape(f,(3,3)) J = sp.Matrix(F.tolist()).det() # TODO: Is this supposed to be J**(-4/3) or J**(-2/3)? E = 0.5*(J**sp.Rational(-2,3)*np.dot(F.T,F) - np.eye(3)) e = lin.utri_flat(E) # Expand the quadratic form's action on e Qee = np.dot(e,np.dot(Q,e)) # Attempt to load this from a pickle file try: Dsym = pickle.load(open(symfile_path+".pkl",'rb')) # Otherwise, just recompute it except Exception: # Construct the tangent stiffness as a symbolic expression # Calculate first derivatives dQ = np.empty(9,dtype=object) for i in range(9): print("Symbolic dQ_{}".format(i)) dQ[i] = sp.diff(Qee,f[i]) # Calculate second derivatives Dsym = np.empty((9,9),dtype=object) for (i,j) in lin.utri_indices(9): print("Symbolic ddQ_{0}{1}".format(i,j)) dQi = dQ[i] dQj = dQ[j] dQij = sp.diff(dQi,f[j]) Dsym[i,j] = dQi*dQj + dQij # Optimize the derivative by substituting for J print(" Simplifying...") print(" J ",end="") sys.stdout.flush() Dsym[i,j] = Dsym[i,j].subs(J,sp.symbols('J')) # Further optimize by replacing products of f compoenents if resym == True: for (k,l) in lin.utri_indices(9): print("f{}f{}".format(k,l),end=" ") sys.stdout.flush() pair_symbol = sp.symbols('ff_{0}{1}'.format(k,l)) Dsym[i,j] = Dsym[i,j].subs(f[k]*f[l],pair_symbol) # Since D will be symmetric, assign the symmetric components print("\n Symmetrizing...") Dsym[j,i] = Dsym[i,j] # This computation is pretty costly, so let's save it # frequently if resym == True: pickle.dump(Dsym, open(symfile_path+"_{}{}.pkl".format(i,j),'wb')) pickle.dump(Dsym, open(symfile_path+".pkl",'wb')) return Dsym
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computeTangent(self):\n # return np.matmul(self.examples.T, self.gradAmbient)\n return self.gradAmbient + self.centroid * minkowskiDot(self.centroid, self.gradAmbient)", "def tangent(self, param, diff=0, xyz=False):\n return self.diff(param, diff=diff+1, xyz=xyz)", "def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)", "def tangent(x, K=10, C=0.1):\n s = np.zeros_like(x)\n m = (x >= 0)\n # 1) x >= 0\n e = np.exp(-x[m] * C)\n s[m] = K * (1 - e) / (1 + e)\n # 2) x < 0\n e = np.exp(x[~m] * C)\n s[~m] = K * (e - 1) / (e + 1)\n return s", "def calcT(self, theta, T_ss):\n return T_ss * np.cos(theta)**0.25", "def tan(self):\n\t\t# Ensure that no values in self.val are of the form (pi/2 + k*pi) \n\t\tvalues = map(lambda x: ((x / np.pi) - 0.5) % 1 == 0.0, self.val)\n\t\tif any(values):\n\t\t\traise ValueError(\"Tangent not valid at pi/2, -pi/2.\")\n\t\tval = np.tan(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.power(1 / np.cos(self.val), 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = np.multiply(to_multiply, self.der)\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)", "def edge_tangent(edge):\n tan = None\n for l in edge.link_loops:\n t = edge.calc_tangent(l)\n if not round(t.z):\n tan = t\n return tan", "def tangent(self, p):\n p = array(p, float)\n v = (p - self.o)\n v /= norm(v)\n b = self.o + ((cross(v, self.N) - v) / 3)*self.r\n mb = _mirror_(self.o, p, b) \n mbb = mb - b\n return mbb/norm(mbb)", "def tangent(self, uv):\n dU, dV = gp_Dir(), gp_Dir()\n res = GeomLProp_SLProps(self.surface(), uv[0], uv[1], 1, 1e-9)\n if res.IsTangentUDefined() and res.IsTangentVDefined():\n res.TangentU(dU), res.TangentV(dV)\n return (geom_utils.gp_to_numpy(dU)), (geom_utils.gp_to_numpy(dV))\n return None, None", "def ttd_l_func(self):\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n return (self.ttd_l.val - T_mix_ph(o1, T0=self.outl[0].T.val_SI) +\n T_mix_ph(i2, T0=self.inl[1].T.val_SI))", "def calcT(self, theta, phi, T_ss):\n return T_ss * np.maximum(0.0, np.cos(theta) * np.cos(phi))**0.25", "def SLE_DL(t, y):\n DyFun = SLEfun(y,C)\n Dygrand = tf.gradients(y, t)[0]\n return Dygrand - DyFun", "def _dy(self, T):\n return self._h(np.diff(T)) * self._a / self._m / self._c * np.diff(T) * np.array([1, -1])", "def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir", "def tanh(a):", "def activ_fn_derivative(z):\n return 1 - np.square(np.tanh(z))", "def diffuse_transmittance(TL = TL_default):\n return ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))", "def slope_of_tangent(self, x: float, graph: ParametricFunction, **kwargs) -> float:\n\n return np.tan(self.angle_of_tangent(x, graph, **kwargs))", "def dx(x, t, S):\n f = S @ x\n phi = f @ x\n return x * (f - phi)", "def tanh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tanh()))", "def dynstall_oye_dxdt_simple(fs, fs_alpha, tau):\n return 1/tau * (fs_alpha - fs)", "def compute_tangent(self):\n ce = CovEmbedding()\n ce.fit([self.cov_])\n self.tangent_ = ce.transform([self.cov_])[0]\n return self", "def CalcForceDistribution(self):\n\t\t\n\t\tself.F = self.s * (self.Tether - self.X)\n\t\t\n\t\treturn self.F", "def grad_tanh(self):\r\n return 1 - np.square(self.tanh(self.x))", "def ti_func(self):\n return self.ti.val - self.calc_ti()", "def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)", "def CalcLinearForce(self):\n\t\t\n\t\tself.F = -self.s * self.X\n\t\t\n\t\treturn self.F", "def grad_tanh(self):\n grad = 1 - self.tanh(self.x) ** 2\n return grad", "def tan(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tan()))", "def to_tangent(self, vector, base_point):\n tangent_vec = self._iterate_over_factors(\n \"to_tangent\", {\"base_point\": base_point, \"vector\": vector}\n )\n return tangent_vec", "def deriv_screw(self, t: float, endBehavior: str = 'halt') -> Vector:\n dT = self.deriv(t,endBehavior)\n return so3.deskew(dT[0])+dT[1]", "def deriv_screw(self, t:float, endBehavior: str = 'halt') -> Tuple[Vector3,Vector3]:\n dT = self.deriv(t,endBehavior)\n return so3.deskew(dT[0])+dT[1]", "def ts_bt_func(thermal_rad, k1, k2):\n ts_bt = np.copy(thermal_rad).astype(np.float64)\n ts_bt[ts_bt <= 0] = np.nan\n np.reciprocal(ts_bt, out=ts_bt)\n ts_bt *= k1\n ts_bt += 1.0\n np.log(ts_bt, out=ts_bt)\n np.reciprocal(ts_bt, out=ts_bt)\n ts_bt *= k2\n return ts_bt.astype(np.float32)", "def diff_func(sat):\n state = sat.getstate()\n dstate = np.zeros(7)\n dstate[-1] = 1.0\n dstate[0] = state[1]\n dstate[2] = state[3]/(state[0])\n dstate[4] = state[5]/(state[0]*np.sin(state[2]))\n acc = tot_acc(sat)\n dstate[1], dstate[3], dstate[5] = sat.getvdot(acc[0], acc[1], acc[2])\n return dstate", "def tand(A):\n Arad = np.deg2rad(A)\n x = np.tan(Arad) \n return x", "def tanh(x):\r\n # see decorator for function body\r", "def global_stiffness(self, di=(0, 0, 0), dj=(0, 0, 0)):\n di, dj = np.asarray(di), np.asarray(dj)\n t = self.transformation_matrix(di, dj)\n k = self.local_stiffness(di, dj)\n return t.T.dot(k).dot(t)", "def get_symbolic_model(self):\n return self.sym_func", "def get_symbolic_model(self):\n return self.sym_func", "def tanh(x):\n return 0.0", "def grad_tanh(self):\n return (1-np.tanh(self.x)*np.tanh(self.x))\n raise NotImplementedError(\"tanh gradient not implemented\")", "def get_tangent(self, reference_point):\r\n closest_distance = inf\r\n current_piece = None\r\n # Iterate over all track's pieces to find the piece which is closest to the reference point.\r\n for piece in self.pieces:\r\n closest = piece.get_closest_to_point(reference_point)\r\n distance = closest.distance(reference_point)\r\n if distance < closest_distance:\r\n closest_distance = distance\r\n current_piece = piece\r\n # Returns the tangent of the closest point to the reference point in this piece\r\n return current_piece.get_tangent(reference_point)", "def tan(self, a):\n return math.tan(a)", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def thermal_rad_func(ts_bt, k1, k2):\n thermal_rad = np.copy(ts_bt).astype(np.float64)\n np.reciprocal(thermal_rad, out=thermal_rad)\n thermal_rad *= k2\n np.exp(thermal_rad, out=thermal_rad)\n thermal_rad -= 1.0\n np.reciprocal(thermal_rad, out=thermal_rad)\n thermal_rad *= k1\n return thermal_rad.astype(np.float32)", "def getNormalizedTangent(pt = \"\"):\n\n if cmds.objectType(pt) != \"nurbsCurve\":\n return\n\n crv = pt.partition(\".\")[0]\n print(pt, crv)\n cvs = cmds.ls(\"{0}.cv[*]\".format(crv), fl=True)\n denom = len(cvs)\n num = float(pt.partition(\"[\")[2].rpartition(\"]\")[0])\n pr = num/denom\n tan = cmds.pointOnCurve(crv, pr=pr, nt=True)\n\n return(tan)", "def f(x):\n return np.tan(x) - np.sin(x) - (m*g)/(2*k*L)", "def deviatoric_stress_tensor(self):\n s = \"::: forming the deviatoric part of the Cauchy stress tensor :::\"\n print_text(s, self.color)\n epi = self.strain_rate_tensor()\n tau = 2*self.eta*epi\n return tau", "def _tand(v):\n return math.tan(math.radians(v))", "def ttd_u_func(self):\n T_i1 = T_mix_ph(self.inl[0].to_flow(), T0=self.inl[0].T.val_SI)\n T_o2 = T_mix_ph(self.outl[1].to_flow(), T0=self.outl[1].T.val_SI)\n return self.ttd_u.val - T_i1 + T_o2", "def strain_rate_tensor(self):\n U = self.U3\n return 0.5 * (grad(U) + grad(U).T)", "def tanh(input, inplace=False):\n return FunctionLib.apply(\n 'Tanh', input.device, [input],\n outputs=[input if inplace else None])", "def compute_stoch_gradient(y, tx, w):\n N = y.shape[0]\n e = y - np.dot(tx, w)\n \n return -1/N*np.dot(tx.T, e)", "def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0., theta_f=0.):\n if (polarization=='s'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n elif (polarization=='p'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")", "def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]", "def derived_tanh(x):\n return 1 - tanh(x)", "def derived_tanh(x):\n return 1 - tanh(x)", "def tan(data):\n return _make.tan(data)", "def dL_dtheta(self):\r\n dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z)\r\n if self.has_uncertain_inputs:\r\n dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z, self.X, self.X_variance)\r\n dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1, self.Z, self.X, self.X_variance)\r\n dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2, self.Z, self.X, self.X_variance)\r\n else:\r\n dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.X, self.Z)\r\n dL_dtheta += self.kern.dKdiag_dtheta(self.dL_dpsi0, self.X)\r\n\r\n return dL_dtheta", "def getAbsGrad(Tx,Ty):\n \n # These are place holders, you will overwrite them in your code.\n dummy = torch.zeros(3,4)\n absGradT = dummy\n\n # TODO: your code here to compute absGradT = sqrt(Tx^2 + Ty^2)\n \n return absGradT", "def calc_ti(self):\n m = 0\n for i in self.inl:\n m += i.m.val_SI * i.fluid.val[self.fuel_alias.val]\n\n for o in self.outl:\n m -= o.m.val_SI * o.fluid.val[self.fuel_alias.val]\n\n return m * self.lhv", "def grad_sigmoid(self):\n grad = self.sigmoid(self.x) * (1 - self.sigmoid(self.x))\n return grad", "def diffuse_coefficient(self):\n return self._diffuse_coefficient", "def rhs(x, t):\n\n return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))", "def get_thrust(self, t):\n\n if t >= 0.0:\n prev_t = 0.0\n prev_thrust = 0.0\n for node in self.thrust:\n if round(t, 3) <= round(node.t, 3):\n factor = (t - prev_t) / (node.t - prev_t)\n return prev_thrust + factor * (node.thrust - prev_thrust)\n\n prev_t, prev_thrust = node\n\n return 0.0", "def tan(x):\n return 0.0", "def ttd_u_func(self):\n i1 = self.inl[0].to_flow()\n o2 = self.outl[1].to_flow()\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n return self.ttd_u.val - T_bp_p(i1) + T_o2", "def tanh_backward(dA, internal_params):\n Z = internal_params\n Zt=tanh(Z)\n dzp=np.power(Zt,2)\n print(dzp.shape)\n dZ=np.multiply(dzp,dA)\n return dZ\n # raise NotImplementedError", "def get_2nd_derivative(self, output_name, wrt):\n \n return self.hessian[wrt[0]][wrt[1]][output_name]", "def __GeometricStiffnessIntegrand__(self, SpatialGradient, CauchyStressTensor, detJ):\n return GetGeomStiffness(np.ascontiguousarray(SpatialGradient),CauchyStressTensor, detJ, self.nvar)", "def tanh_backward(dA, Z):\n\n s = 1 - tanh(Z)**2\n dZ = dA * s\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def d_tanh(x):\n return 1. - np.power(np.tanh(x), 2)", "def Tc (x,infin, a, nu):\r\n return infin + a* (x ** (-1/nu))", "def returnTangentVectorAtXi(self, xi_value):\n xVec, yVec, zVec = self.XYZCoordinate\n\n sY = self.dudx_xyPlane.evalf(subs={'xi': xi_value}) # scalar y\n sZ = self.dudx_xzPlane.evalf(subs={'xi': xi_value}) # scalar z\n \n tanVec = xVec + sY*yVec + sZ*zVec\n \n return tanVec.astype(np.float64)", "def ft(t):\r\n ft = t ** (1.0 / 3.0) if t > 0.008856 else 7.787 * t + 4 / 29\r\n return ft", "def sokal_sneath_coeff(self):\n a, c, _, b = self.to_ccw()\n return _div(a, a + 2 * (b + c))", "def compute_td_spectral_function(self):\n nomegase = self.nomegase\n nkpt = self.nkpt\n nband = self.nband\n ntemp = self.ntemp\n\n self.spectral_function_T = np.zeros((nomegase, ntemp, nkpt, nband),\n dtype=float)\n\n omega = np.einsum('ijt,l->ijlt',\n np.ones((nkpt, nband, ntemp)), self.omegase)\n\n self.spectral_function_T = (\n (1 / np.pi) * np.abs(self.self_energy_T.imag) /\n ((omega - self.self_energy_T.real) ** 2\n + self.self_energy_T.imag ** 2)\n )", "def tanh(x):\n return (1 - e ** (-2*x))/ (1 + e ** (-2*x))", "def dynamics(state,t):\n global M,m\n f = control_upright(state)\n # f = 0\n dydx = np.zeros_like(state)\n x,x_dot,th,th_dot = state #unpacking the state\n dydx[0] = x_dot\n dydx[2] = th_dot\n\n den1 = M + (m*sin(th)*sin(th))\n dydx[1] = (f + (m*g*sin(th)*cos(th)) + m*L*th_dot*th_dot*sin(th) + (b/L)*(th_dot*cos(th)))/den1\n den2 = L*den1\n dydx[3] = (((M+m)*g*sin(th) + f*cos(th) + m*L*th_dot*th_dot*sin(th)*cos(th))/den2) + (b/(m*L*L))*th_dot\n dydx[3] = -dydx[3]\n\n return dydx", "def dynstall_oye_dxdt(t,fs,u,p):\n alpha = u['alpha'](t)\n f_st = p['F_st'](alpha)\n return 1/p['tau'] * (f_st - fs)", "def L_tf(self) -> tf.Tensor:\n return tf.diag(self.out_degrees_tf_vector) - self.A_tf", "def get_thrust_and_moment(self):\n\n f1 = self.k_f * self.omega_1 ** 2\n f2 = self.k_f * self.omega_2 ** 2\n \n # c is often used to indicate \"collective\" thrust\n c = f1 + f2\n \n M_x = (f1 - f2) * self.l\n return c, M_x", "def tanh(self):\t\t\t\t\n\t\tval = np.tanh(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = 1 / np.power(np.cosh(self.val), 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)", "def d_tanh(x):\n\n return 1 - x.tanh().pow(2)", "def ThetaFunc(self, x):\n return 0.5 * (np.sign(x) + 1)", "def tanh(x):\n raise NotImplementedError", "def effective_stress(self):\n tau = self.deviatoric_stress_tensor()\n tu_xx = tau[0,0]\n tu_yy = tau[1,1]\n tu_zz = tau[2,2]\n tu_xy = tau[0,1]\n tu_xz = tau[0,2]\n tu_yz = tau[1,2]\n \n # Second invariant of the strain rate tensor squared\n taudot = 0.5 * (+ tu_xx**2 + tu_yy**2 + tu_zz**2) \\\n + tu_xy**2 + tu_xz**2 + tu_yz**2\n return taudot", "def tanh(x):\n return (1- power(e,(-2*x))) / (1 + power(e,(-2*x)))", "def to_tangent(self, vector, base_point):\n return gs.copy(vector)", "def eval_accel(self,t,endBehavior='halt') -> Vector:\n res = Trajectory.deriv_state(self,t,endBehavior)\n return res[len(res)//2:]", "def _tof_equation(x, y, T0, ll, M):\n if M == 0 and np.sqrt(0.6) < x < np.sqrt(1.4):\n eta = y - ll * x\n S_1 = (1 - ll - x * eta) * .5\n Q = 4 / 3 * hyp2f1b(S_1)\n T_ = (eta ** 3 * Q + 4 * ll * eta) * .5\n else:\n psi = _compute_psi(x, y, ll)\n T_ = np.divide(np.divide(psi + M * pi,\n np.sqrt(np.abs(1 - x ** 2))) - x + ll * y,\n (1 - x ** 2))\n\n return T_ - T0", "def casadi_ode(self, t, x, u, w):\n v = u[0]\n thetadot = u[1]\n theta = x[2]\n f = cs.vertcat(v * cs.cos(theta), v * cs.sin(theta), thetadot)\n if self.use_nonlinear_noise_model:\n w_vec = cs.vertcat(self.scale[0]*(cs.cos(theta) * w[0] - cs.sin(theta) * w[1]),\n self.scale[1]*(cs.sin(theta) * w[0] + cs.cos(theta) * w[1]),\n self.scale[2]*(v * w[2]))\n else:\n w_vec = w\n return f + w_vec", "def linear_tween(t, b, c, d):\n return c * t / d + b", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def get_basis(self, t):\n return np.vstack([self._Phi[:, t], self._PhiD[:, t]]), np.vstack([self._PhiD[:, t], self._PhiDD[:, t]])", "def tangentConstraint(*args, aimVector: Union[List[float, float, float], bool]=None, layer:\n AnyStr=\"\", name: Union[AnyStr, bool]=\"\", remove: bool=True, targetList:\n bool=True, upVector: Union[List[float, float, float], bool]=None, weight:\n Union[float, bool]=0.0, weightAliasList: bool=True, worldUpObject:\n Union[name, bool]=None, worldUpType: Union[AnyStr, bool]=\"\",\n worldUpVector: Union[List[float, float, float], bool]=None, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def gradient(cls, x):\n return 1 - TanH.apply(x) ** 2", "def tan(x):\r\n # see decorator for function body\r", "def get_input_grad(self, Y, T):\n return (Y - T) / Y.shape[0]", "def rhs(y,t):\n return math.cos(t)" ]
[ "0.66807896", "0.59414274", "0.5845319", "0.5835779", "0.5743613", "0.57013124", "0.56820303", "0.5665989", "0.5611205", "0.5600077", "0.55448455", "0.5506846", "0.5505399", "0.5466508", "0.54543483", "0.5423472", "0.54231364", "0.5411315", "0.5397401", "0.5387374", "0.53846455", "0.5372674", "0.5372582", "0.5370558", "0.5352683", "0.53487676", "0.53480005", "0.53453845", "0.53411925", "0.5339904", "0.5324839", "0.53171325", "0.5312734", "0.527867", "0.5263587", "0.5253036", "0.52519655", "0.523305", "0.523305", "0.5227857", "0.52273005", "0.5200995", "0.51984626", "0.519465", "0.5190722", "0.5183905", "0.51747215", "0.5168195", "0.5145803", "0.5138846", "0.5137644", "0.51371574", "0.5136219", "0.5132122", "0.51201", "0.5119211", "0.5119211", "0.5112039", "0.51107395", "0.5109342", "0.5105647", "0.51030743", "0.50999904", "0.5087301", "0.50845456", "0.5083747", "0.5080221", "0.50685096", "0.5067081", "0.5048441", "0.5047911", "0.50465417", "0.5045582", "0.50440866", "0.5043699", "0.50425386", "0.50394464", "0.5039305", "0.5039002", "0.50292134", "0.50249976", "0.50221664", "0.501634", "0.50154763", "0.5013962", "0.5013773", "0.5007848", "0.5006213", "0.5000051", "0.49998817", "0.4999347", "0.49862593", "0.49857828", "0.49850944", "0.49844515", "0.49789312", "0.49782485", "0.49738923", "0.4972846", "0.49726805" ]
0.51767015
46
Base of sett module
def base(request): return render(request, 'sett_base.html', {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set():", "def set():\n pass", "def Set(self) -> None:", "def __init__(self):\n self.set = set()", "def use(self):", "def set(x):\n pass", "def __init__(self, *args):\n _snap.TIntSet_swiginit(self, _snap.new_TIntSet(*args))", "def __init__(self, name: unicode, set: ghidra.util.graph.KeyIndexableSet):\n ...", "def base():", "def _setup(self):", "def _setup(self):", "def __init__(self):\n self.EntireSet = []", "def __init__(self,s={}) -> None:\n\n self.set=list()", "def make_io_set(self):\n pass", "def regular(self):", "def __init__(self):\n self.s = set()", "def getSets():", "def setup( self ):", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def setup(self, i, t):\n \n self.t = t\n return i", "def SetTrSet(self,value):\n self.ds = value", "def __init__():", "def __int__(self):\n pass", "def __init__(self):\n self.randomSet = dict()", "def _base(self):\n pass", "def __init__(self):\n self.currSyms = {}\n Traversable.__init__(self)", "def __init__(self, sets: List[ColdStartUserSet]):\n self.sets = sets", "def common(self):", "def set(self, **kwargs):\n raise NotImplementedError", "def ustvari(self):\n raise NotImplementedError", "def init(self):", "def init(self):", "def __init__(self):\n self.l=set()", "def __init__(self, base):\r\n\r\n self.base = base", "def __init__(self):\n self.ds = set()\n self.keys = []", "def support(self):", "def set(self, U):\n pass", "def set(self, U):\n pass", "def setup(self) -> None:", "def __init__( settings={} ):", "def sth():", "def __init__(self):\n self._data = set()", "def __init__(self):\n self.container = set()", "def setup(self):\n\t\tpass", "def __call__(self):\n\t\treturn", "def _set_attributes(self):", "def __setitem__(self, t: Tuple[int, ...], o: 'Tree') -> None:\n ...", "def __init__(self):\n self.value_set = set()\n self.values = []", "def _init(self):", "def __init__(self, set_ptr=None):\n\n if set_ptr is None:\n self.set = ipset.ipset_new()\n else:\n self.set = set_ptr", "def MINET(self):", "def __call__(self) -> None:", "def __init__(self):\n self.recipeset = {}\n self.hardcoded()", "def __init__(self, base, **kwargs):\n self.base = base", "def __init__(self, base):\n\n self.base = base", "def setup(self):\n ...", "def initialise(self):", "def setup_class(klass):", "def setup_class(klass):", "def set_T(self, T):\n self.T = T", "def T(self, T) :\n\t\ttry :\n\t\t\tself._T = T\n\t\texcept Exception as e:\n\t\t\traise e", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self, t):\n # self.k = k # TODO: refactor and add here as param\n # self.l = l # TODO: refactor and add here as param\n self._t = t\n self._hash_tables = []", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def setToolTo(*args, **kwargs)->None:\n pass" ]
[ "0.7038524", "0.70106906", "0.64644986", "0.62909436", "0.622287", "0.6137132", "0.60996884", "0.59956205", "0.594199", "0.593214", "0.593214", "0.5907629", "0.59012085", "0.5887358", "0.58760923", "0.5856856", "0.58564854", "0.5838827", "0.573239", "0.573239", "0.573239", "0.573239", "0.57154113", "0.57154113", "0.5713323", "0.56771064", "0.5671844", "0.5662046", "0.56457", "0.5644961", "0.5581008", "0.5576573", "0.554014", "0.5538921", "0.5519707", "0.5501204", "0.5501204", "0.5499716", "0.5493004", "0.5488927", "0.5482923", "0.5480788", "0.5480788", "0.5474288", "0.546925", "0.54567975", "0.54345965", "0.54325765", "0.54201853", "0.54103744", "0.5400106", "0.53910065", "0.53867507", "0.5384426", "0.53765255", "0.5363788", "0.53580624", "0.5353544", "0.53386617", "0.5337176", "0.53368247", "0.53364664", "0.53303814", "0.53303814", "0.5326646", "0.532485", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53190553", "0.53188425", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.5315295", "0.531335" ]
0.0
-1
UiView of sett module
def ui_view(request): return render(request, 'sett_ui_view.html', {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ui(self):\n return ui", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def prepare_UI(self):", "def view(self):", "def show(self):", "def updateSettingsUI(self):\n\n pass", "def __init__(self):\n self.view = GuiView(self)\n return", "def init_ui(self):\n raise NotImplementedError(\"This is an abstract method.\")", "def getWidget(self):", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['hares folder'] = widgets.ExtendedLineEdit(\n label='HARES uitvoerbestanden folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_hares_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Start lezen uitvoerbestanden')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def setup_additional_ui(self):\n\n #set title\n self.setWindowTitle(self.title)\n\n #set question\n self.lbl_question.setText(self.question)\n\n #set_remember_choice\n self.set_remember_choice(self.chkbx_remember_choice.isChecked())", "def ui(self):\n return self._ui", "def view(self):\n raise NotImplementedError", "def additional_ui(self):\n return _UI_DEF", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def inicialUI(self):\r\n\r\n self.setGeometry(500, 500, 500, 500)\r\n self.setWindownTitle(\"Pesquisa\")\r\n self.displayWidgets()\r\n\r\n self.show()", "def gui(self):\n return gui", "def show(self) -> None:", "def ui(self, ui):\n\n self._ui = ui", "def set_ui(self):\n\n self.setLayout(self.horizon_layout)\n self.setWindowTitle(\"数据采集\")\n self.setWindowIcon(self.Icon)\n self.setWindowState(Qt.WindowMaximized)\n # self.resize(self._size_of_x, self._size_of_y)\n\n # //-set left\n self.horizon_left_layout1.addWidget(self.ECG)\n self.horizon_left_layout1.addWidget(self.ECGWin)\n self.horizon_left_layout2.addWidget(self.Respiration)\n self.horizon_left_layout2.addWidget(self.RespirationWin)\n self.horizon_left_layout3.addWidget(self.PulseWave)\n self.horizon_left_layout3.addWidget(self.PulseWaveWin)\n # self.horizon_left_layout4.addWidget(self.SpO2)\n # self.horizon_left_layout4.addWidget(self.SpO2Win)\n\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout1)\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout2)\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout3)\n # self.vertical_left_layout.addStretch(1)\n # self.vertical_left_layout.addLayout(self.horizon_left_layout4)\n # self.vertical_left_layout.addStretch(1)\n\n # //-set right\n # self.vertical_right_layout.addStretch(1)\n self.vertical_right_layout.addWidget(self.save)\n self.vertical_right_layout.addWidget(self.clear)\n self.vertical_right_layout.addWidget(self.receive)\n self.vertical_right_layout.addStretch(1)\n self.vertical_right_layout.addWidget(self.exit)\n # self.vertical_right_layout.addStretch(1)\n\n # //-set layout\n # self.horizon_layout.addStretch(0)\n self.horizon_layout.addLayout(self.vertical_left_layout)\n # self.horizon_layout.addStretch(0)\n # self.horizon_layout.addWidget(self.dataWin)\n self.horizon_layout.addLayout(self.vertical_right_layout)", "def _ui_module(self, name, module):\n raise NotImplementedError()", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def __init__(self):\r\n super().__init__()\r\n self.init_ui()", "def show(self):\n pass", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def setUp(self):\n self.ui = UI()", "def show(self):\n\n pass", "def toControls(self,widget):", "def setup(self):\n self.ui.setup_window()", "def on_action_set_view(self, content):\n self._view = content['view']\n self.refresh_traits_widget()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n self.input_elements['factor Tm Tp'] = widgets.ParameterInputLine(\n label='Factor Tm naar Tp:',\n labelwidth=labelwidth,\n unitlabel='(NVT: Tp aanwezig)' if 'Tp' in self.hydraulic_loads.columns else '',\n validator=QtGui.QDoubleValidator(0.01, 99.99, 20),\n )\n\n if 'Tp' in self.hydraulic_loads.columns or self.parent_tab.step != 'I1':\n self.input_elements['factor Tm Tp'].set_enabled(False)\n\n # Add line edit with browsebutton for Master template\n self.input_elements['mastertemplate'] = widgets.ExtendedLineEdit(\n label='Master template bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_master_template)\n )\n\n # Add line edit with browsebutton for depth file\n self.input_elements['depthfile'] = widgets.ExtendedLineEdit(\n label='Bathymetry bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_bathymetry_file)\n )\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['swanfolder'] = widgets.ExtendedLineEdit(\n label='SWAN uitvoer folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_swan_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Genereer invoer')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def show():\n from siding.addons import ui\n ui.show()", "def admin(self, view):\n view.admin = True\n return view", "def set_view(self):\n self.scene.mlab.view(azimuth=90.0, elevation=-90.0)", "def __init__(self, ui: UI):\n super().__init__(ui)", "def widget(self, request, group):", "def _initializeUi(self):\r\n if self._mode == 'imperial':\r\n self.imperial_button.setChecked(True)\r\n self.imperial_button.clicked.emit()\r\n else:\r\n self.metric_button.setChecked(True)\r\n self.metric_button.clicked.emit()", "def _init_ui(self):\n self.setWindowTitle(\"HB Havens: resultaten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n # Create figure\n self.figure = Figure(figsize=(4,4))\n self.ax = self.figure.add_subplot()\n\n self.ax.grid()\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)\n self.ax.tick_params(axis='y', color='0.75')\n self.ax.tick_params(axis='x', color='0.75')\n self.ax.set_aspect(1)\n\n # Add canvas\n self.canvas = FigureCanvasQTAgg(self.figure)\n\n # this is the Navigation widget\n # it takes the Canvas widget and a parent\n self.layout().addWidget(self.canvas)\n\n # Add location selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Locatie:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.location_combobox = QtWidgets.QComboBox()\n self.location_combobox.addItems(self.result_locations)\n self.location_combobox.setCurrentIndex(self.locid)\n self.location_combobox.currentIndexChanged.connect(self._set_location)\n hbox.addWidget(self.location_combobox)\n self.layout().addLayout(hbox)\n\n # Add parameter selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Parameter:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.parameter_combobox = QtWidgets.QComboBox()\n self.input_parameters = self.modelunctab.mainmodel.hydraulic_loads.result_columns[:]\n self.parameter_combobox.addItems(self.input_parameters)\n self.parameter_combobox.currentIndexChanged.connect(self._set_parameter)\n self.parameter_combobox.setCurrentIndex(0)\n self._set_parameter()\n self.figure.tight_layout()\n hbox.addWidget(self.parameter_combobox)\n self.layout().addLayout(hbox)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n self.layout().addWidget(line)\n\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n self.layout().addWidget(self.closebutton, 0, QtCore.Qt.AlignRight)\n\n self.layout().setSizeConstraint(QtWidgets.QLayout.SetFixedSize)", "def __init__(self):\n super(QTUIProject, self).__init__()\n self.setupUi(self)\n self.assignWidgets()\n self.show()\n self.SlotsJsonName = \"Slots Assets Folder\"\n self.BingoJsonName = \"Bingo Assets Folder\"", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def initUI(self):\n # Setting the main layout as Vertical.\n self.mainLayout = QHBoxLayout()\n\n # Create title.\n self.title = QLabel(self.__name + \" : \")\n\n # Add description as tooltip.\n self.title.setToolTip(self.__description)\n\n # Add title to main layout.\n self.mainLayout.addWidget(self.title)\n\n # Create ComboBox.\n self.dropDown = QComboBox()\n\n # Add datas to drop down.\n self.dropDown.addItems(self.__datas)\n\n # Set default index to dropdown.\n self.dropDown.setCurrentIndex(self.__currentValue)\n\n # Connect dropdown with update method.\n self.dropDown.currentIndexChanged.connect(self.changeCurrentValue)\n\n # Add ComboBox to main layout.\n self.mainLayout.addWidget(self.dropDown)\n\n # Add the main layout to the window.\n self.setLayout(self.mainLayout)", "def create_widgets(self):", "def iniciaUI(self):\n\n self.setGeometry(100,100, 300, 200)\n self.setWindowTitle(\"Formulario\")\n self.displayWidgets()\n\n self.show()", "def show(self):\n\t\traise NotImplementedError()", "def init_widget(self):", "def initUI(self):\n\n self.wid = RosGenWidget()\n self.setCentralWidget(self.wid)\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Файл')\n editMenu = menubar.addMenu('&Редактирование')\n self.create_menu_par('Менеджер подписчиков и издателей', self.wid.show_manager, fileMenu, 'Ctrl+M')\n self.create_menu_par('Очистить', self.wid.clear_all_lines, editMenu, 'Ctrl+D')\n self.create_menu_par('Загрузить данные из...', self.wid.open_fileDialog, fileMenu, 'Ctrl+F')\n self.create_menu_par('Сохранить как...', self.wid.save_fileDialog, fileMenu, 'Ctrl+S')\n self.create_menu_par('Выход', self.exit_app, fileMenu, 'Esc')\n self.statusbar = self.statusBar()\n self.statusbar.showMessage('Ожидание данных')\n self.wid.msg2Statusbar[str].connect(self.statusbar.showMessage)\n self.setGeometry(600, 200, 700, 400)\n self.setWindowTitle('Генератор шаблонов ROS-приложения')\n self.show()", "def setup_ui(self):\n\t\t\n\t\t# CREATION DU LAYOUT\n\t\tself.layout = QtWidgets.QHBoxLayout(self) #le layout prend la fenetre principal en argument donc notre self\n\t\t\n\t\t# CREATION DES WIDGETS\n\t\tself.cbb_devisesFrom = QtWidgets.QComboBox() #combobox (liste deroulante) pour choisir la devise From\n\t\tself.spn_montant = QtWidgets.QSpinBox() #spinbox (zone affichage) du montant a convertir\n\t\tself.cbb_devisesTo = QtWidgets.QComboBox() #cbb pour choisir la devise To\n\t\tself.spn_montantConverti = QtWidgets.QSpinBox() #spn du montant converti\n\t\tself.btn_inverser = QtWidgets.QPushButton(\"Inverser devises\") #bouton pour inverser les devises\n\t\t\n\t\t# AJOUT AU LAYOUT\n\t\tself.layout.addWidget(self.cbb_devisesFrom)\n\t\tself.layout.addWidget(self.spn_montant)\n\t\tself.layout.addWidget(self.cbb_devisesTo)\n\t\tself.layout.addWidget(self.spn_montantConverti)\n\t\tself.layout.addWidget(self.btn_inverser)", "def fill_ui(self):\n self.review_type_widget.set_review_type(self.review_type)\n\n if self.reviewer:\n self.reviewer_name_widget.setText(self.reviewer.name)\n\n if self.task:\n self.task_name_widget.setText(\n \"%s (%s) (%s)\"\n % (\n self.task.name,\n \" | \".join(\n [self.task.project.name]\n + [parent_task.name for parent_task in self.task.parents]\n ),\n self.task.id,\n )\n )\n\n # from stalker import Version\n # version = Version.query.filter(Version.task == self.task).order_by(Version.date_created.desc()).first()\n #\n # if version:\n # self.latest_version_widget.version = version", "def setup_ui(self):\n # Creation du layout\n self.layout = QtWidgets.QVBoxLayout(self)\n # Creation des widgets\n self.le_movieTitle = QtWidgets.QLineEdit()\n self.btn_addMovie = QtWidgets.QPushButton(\"Ajouter un film\")\n self.lw_movies = QtWidgets.QListWidget()\n self.lw_movies.setSelectionMode(QtWidgets.QListWidget.ExtendedSelection)\n self.btn_removeMovies = QtWidgets.QPushButton(\"Supprimer le(s) film(s)\")\n # Ajout des widgets dans le layout\n self.layout.addWidget(self.le_movieTitle)\n self.layout.addWidget(self.btn_addMovie)\n self.layout.addWidget(self.lw_movies)\n self.layout.addWidget(self.btn_removeMovies)", "def show(self):\n raise NotImplementedError", "def show(self):\n raise NotImplementedError", "def additional_ui(self):\n return '''<ui>\n <menubar name=\"MenuBar\">\n <menu action=\"FileMenu\">\n <placeholder name=\"LocalExport\">\n <menuitem action=\"ExportTab\"/>\n </placeholder>\n </menu>\n <menu action=\"BookMenu\">\n <placeholder name=\"AddEditBook\">\n <menuitem action=\"AddBook\"/>\n <menuitem action=\"EditBook\"/>\n </placeholder>\n </menu>\n <menu action=\"GoMenu\">\n <placeholder name=\"CommonGo\">\n <menuitem action=\"Back\"/>\n <menuitem action=\"Forward\"/>\n <separator/>\n </placeholder>\n </menu>\n <menu action=\"EditMenu\">\n <placeholder name=\"CommonEdit\">\n <menuitem action=\"Add\"/>\n <menuitem action=\"Edit\"/>\n <menuitem action=\"Remove\"/>\n <menuitem action=\"Merge\"/>\n </placeholder>\n <menuitem action=\"FilterEdit\"/>\n </menu>\n </menubar>\n <toolbar name=\"ToolBar\">\n <placeholder name=\"CommonNavigation\">\n <toolitem action=\"Back\"/> \n <toolitem action=\"Forward\"/> \n </placeholder>\n <placeholder name=\"CommonEdit\">\n <toolitem action=\"Add\"/>\n <toolitem action=\"Edit\"/>\n <toolitem action=\"Remove\"/>\n <toolitem action=\"Merge\"/>\n </placeholder>\n </toolbar>\n <popup name=\"Popup\">\n <menuitem action=\"Back\"/>\n <menuitem action=\"Forward\"/>\n <separator/>\n <menuitem action=\"Add\"/>\n <menuitem action=\"Edit\"/>\n <menuitem action=\"Remove\"/>\n <menuitem action=\"Merge\"/>\n <separator/>\n <menu name=\"QuickReport\" action=\"QuickReport\"/>\n </popup>\n </ui>'''", "def fromControls(self,widget):", "def showSettings(self):\n self.c.show()", "def _init_ui(self):\n hlayout = QtWidgets.QHBoxLayout()\n\n hlayout.addWidget(QtWidgets.QLabel('Kies een normtraject:'))\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self._update_combobox()\n\n hlayout.addWidget(self.section_combobox)\n\n self.remove_button = QtWidgets.QPushButton('Verwijderen', clicked=self._del_flooddefence)\n hlayout.addWidget(self.remove_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)", "def __init_ui(self, list: List[DiagramView], start_button: StartButtonView):\n minimize_button = QtWidgets.QPushButton()\n minimize_button.setIcon(QIcon(SystemInfo.RESOURCES + 'images/buttons/minimize.svg'))\n minimize_button.setFixedSize(31, 31)\n minimize_button.clicked.connect(self.__minimize_on_click)\n\n horizontal_layout = QHBoxLayout()\n for diagram in list:\n horizontal_layout.addWidget(diagram)\n\n central_layout = QGridLayout()\n central_widget = QtWidgets.QWidget()\n central_widget.setLayout(horizontal_layout)\n central_layout.addWidget(central_widget, 1, 0, 1, -1)\n central_layout.addWidget(start_button, 0, 1)\n central_layout.addWidget(minimize_button, 0, 2)\n\n self.setLayout(central_layout)\n\n self.setWindowFlags(Qt.CustomizeWindowHint)\n self.showMaximized()", "def _setupUi(self):\n self.setupUi(self)\n self.twTree.setStyleSheet(\"background-color: rgb(200, 200, 200)\")", "def setUp(self):\n self.theView = View()", "def additional_ui(self):\n return '''<ui>\n <menubar name=\"MenuBar\">\n <menu action=\"FileMenu\">\n <placeholder name=\"LocalExport\">\n <menuitem action=\"ExportTab\"/>\n </placeholder>\n </menu>\n <menu action=\"EditMenu\">\n <placeholder name=\"CommonEdit\">\n <menuitem action=\"Add\"/>\n <menuitem action=\"Edit\"/>\n <menuitem action=\"Remove\"/>\n <menuitem action=\"Merge\"/>\n </placeholder>\n <menuitem action=\"FilterEdit\"/>\n </menu>\n <menu action=\"BookMenu\">\n <placeholder name=\"AddEditBook\">\n <menuitem action=\"AddBook\"/>\n <menuitem action=\"EditBook\"/>\n </placeholder>\n </menu>\n <menu action=\"GoMenu\">\n <placeholder name=\"CommonGo\">\n <menuitem action=\"Back\"/>\n <menuitem action=\"Forward\"/>\n <separator/>\n </placeholder>\n </menu>\n </menubar>\n <toolbar name=\"ToolBar\">\n <placeholder name=\"CommonNavigation\">\n <toolitem action=\"Back\"/> \n <toolitem action=\"Forward\"/> \n </placeholder>\n <placeholder name=\"CommonEdit\">\n <toolitem action=\"Add\"/>\n <toolitem action=\"Edit\"/>\n <toolitem action=\"Remove\"/>\n <toolitem action=\"Merge\"/>\n </placeholder>\n <separator/>\n <toolitem action=\"OpenMedia\"/>\n </toolbar>\n <popup name=\"Popup\">\n <menuitem action=\"Back\"/>\n <menuitem action=\"Forward\"/>\n <separator/>\n <menuitem action=\"OpenMedia\"/>\n <menuitem action=\"OpenContainingFolder\"/>\n <separator/>\n <menuitem action=\"Add\"/>\n <menuitem action=\"Edit\"/>\n <menuitem action=\"Remove\"/>\n <menuitem action=\"Merge\"/>\n <separator/>\n <menu name=\"QuickReport\" action=\"QuickReport\"/>\n </popup>\n </ui>'''", "def _init_ui(self):\n\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel('Kies een normtraject:')\n\n hlayout.addWidget(label)\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self.section_ids = sorted([''] + io.geometry.import_section_ids(self.datadir))\n self.section_combobox.addItems(self.section_ids)\n\n hlayout.addWidget(self.section_combobox)\n\n self.add_button = QtWidgets.QPushButton('Toevoegen', clicked=self._add_flooddefence)\n\n hlayout.addWidget(self.add_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)", "def _initUI(self):\n\n vlayout = QtWidgets.QVBoxLayout()\n\n # Description\n #----------------------------------------------------------------\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel()\n label.setText('Locatie:')\n label.setFixedWidth(100)\n hlayout.addWidget(label)\n\n label = QtWidgets.QLabel()\n label.setText(self.name)\n hlayout.addWidget(label)\n hlayout.setSpacing(10)\n\n vlayout.addLayout(hlayout)\n\n # Exportnaam\n #----------------------------------------------------------------\n self.exportname = ParameterInputLine(label='Exportnaam:', labelwidth=100)\n self.exportname.LineEdit.setMinimumWidth(200)\n vlayout.addLayout(self.exportname.layout)\n\n # Exportdatabase\n #----------------------------------------------------------------\n self.exportpath = ExtendedLineEdit(label='SQLite-database:', labelwidth=100, browsebutton=True)\n self.exportpath.BrowseButton.clicked.connect(self._get_path_database)\n vlayout.addLayout(self.exportpath.layout)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n # Buttons\n #----------------------------------------------------------------\n hbox = QtWidgets.QHBoxLayout()\n hbox.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum))\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n hbox.addWidget(self.closebutton)\n # Add ok/close\n self.savebutton = QtWidgets.QPushButton('Opslaan')\n self.savebutton.clicked.connect(self._save)\n hbox.addWidget(self.savebutton)\n\n vlayout.addLayout(hbox)\n\n # Add layout to widget\n self.setLayout(vlayout)", "def getControls(self):", "def __init__(self, parent=None):\n super(Inj, self).__init__(parent)\n self.setupUi(self)", "def user_interface_edit_options(cls):\n return", "def display_item_set(self):\n raise NotImplementedError()", "def showSettings():\n cq = dz()\n cq.abag()", "def _createView(self):\n\n items = []\n\n if self.showProgressBar:\n items.append(Item('progress', show_label=False,\n editor=ProgressEditor(callback=self._seek)))\n\n # Controls\n items.append(\n alignCenter(\n Item('backwardButton', style='custom',\n enabled_when='not object.running and object.mainloopRunning '\n +'and object.sensors and object.iteration > 1'),\n Item('runButton', style='custom',\n enabled_when='object.pause and not object.done'),\n Item('pauseButton', style='custom',\n enabled_when='not (object.pause or object.done)'),\n Item('stepButton', style='custom',\n enabled_when='object.pause and not object.done'),\n show_labels=False,\n orientation='horizontal'\n ))\n\n # Repeat button and pause target buttons\n items.append(\n alignCenter(\n Item('repeatButton', show_label=False,\n enabled_when='not object.running and object.mainloopRunning '\n 'and object.iteration > 0'),\n Item('nextTargetButton', show_label=False,\n editor=ButtonEditor(label_value='targetButtonLabel'),\n enabled_when='not object.running and object.mainloopRunning '\n 'and object.pauseTarget'),\n Item('customTargetButton', show_label=False,\n enabled_when='not object.running and object.mainloopRunning')\n ))\n\n # Speed control\n items.append(Item('speed', style='custom', show_label=False,\n editor=EnumEditor(cols=1, values={\n 1 : '1: Slow (update on every iteration)',\n 10 : '2: Medium (update every 10 iterations)',\n 100 : '3: Fast (update every 100 iterations)'\n })\n ))\n\n\n items.extend([\n Group(\n Item('pauseAtNextStep'),\n show_left=False\n ),\n alignLeft(\n Item('stopButton', show_label=False, enabled_when='object.iteration')\n )\n ])\n\n self.traits_view = View(*items)", "def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(30)\n c.elevation(30)\n s.render()", "def initUI(self):\n\n lbl_names = ['Название проекта', 'Версия', 'Директория', 'Описание', 'Автор', 'Почта', 'Дополнительные зависимости', 'Название ноды']\n param_list = ['motor_driver', '0.0.0', '/home/mitya/catkin_ws/src/', 'The motor_driver package', 'D. Potapov',\n 'potapov627@yandex.ru', 'nav_msgs, geometry_msgs, tf, ', 'motor_driver_node']\n labels = []\n for name in lbl_names:\n labels.append(QLabel(name))\n for i, ph in zip(range(len(labels)), param_list):\n ed_line = QLineEdit()\n if i == 1:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([0-9\\.])*[0-9]$\")))\n elif i == 5:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([a-z0-9_-]+\\.)*[a-z0-9_-]+@[a-z0-9_-]+(\\.[a-z0-9_-]+)*\\.[a-z]{2,6}$\")))\n ed_line.setPlaceholderText(ph)\n if i != 0:\n ed_line.textEdited.connect(self.change_data)\n else:\n ed_line.textEdited.connect(self.change_pkg_name)\n self.full_ed_lines.append(ed_line)\n grid = QGridLayout()\n grid.setSpacing(5)\n for i in range(1, len(labels) + 1):\n for j in range(0, 2):\n if j == 0:\n grid.addWidget(labels[i - 1], i, j)\n else:\n grid.addWidget(self.full_ed_lines[i - 1], i, j)\n ch_dirButton = QPushButton(self)\n ch_dirButton.setIcon(QIcon('./icons/open_folder.png'))\n ch_dirButton.clicked.connect(self.ch_dirDialog)\n grid.addWidget(ch_dirButton, 3, 3)\n genButton = QPushButton(\"Сгенерировать\")\n genButton.clicked.connect(self.generate)\n grid.addWidget(genButton, len(labels) + 2, 1)\n self.setLayout(grid)\n self.setMinimumSize(700, 400)\n self.show()", "def setUIfromJSON(self):\n self.settingUI = True\n try:\n if self.params.has(\"name\"):\n self.name.setText(self.params.getString(\"name\"))\n if self.params.has(\"strategy\"):\n self.strategy.setSelectedItem(self.params.getString(\"strategy\"))\n if self.params.has(\"metagameStrategy\"):\n self.metagameStrategy.setSelectedItem(self.params.getString(\"metagameStrategy\"))\n if self.params.has(\"stateMachine\"):\n self.stateMachine.setSelectedItem(self.params.getString(\"stateMachine\"))\n if self.params.has(\"cacheStateMachine\"):\n self.cacheStateMachine.setSelected(self.params.getBoolean(\"cacheStateMachine\"))\n if self.params.has(\"maxPlys\"):\n self.maxPlys.getModel().setValue(self.params.getInt(\"maxPlys\"))\n if self.params.has(\"heuristicFocus\"):\n self.heuristicFocus.getModel().setValue(self.params.getInt(\"heuristicFocus\"))\n if self.params.has(\"heuristicMobility\"):\n self.heuristicMobility.getModel().setValue(self.params.getInt(\"heuristicMobility\"))\n if self.params.has(\"heuristicOpponentFocus\"):\n self.heuristicOpponentFocus.getModel().setValue(self.params.getInt(\"heuristicOpponentFocus\"))\n if self.params.has(\"heuristicOpponentMobility\"):\n self.heuristicOpponentMobility.getModel().setValue(self.params.getInt(\"heuristicOpponentMobility\"))\n if self.params.has(\"mcDecayRate\"):\n self.mcDecayRate.getModel().setValue(self.params.getInt(\"mcDecayRate\"))\n except JSONException as je:\n je.printStackTrace()\n finally:\n self.settingUI = False", "def openTB1Settings(self):\n self.TB1_Window = QtWidgets.QDialog()\n self.TB1_ui = Ui_robotOneConfig()\n self.TB1_ui.setupUi(self.TB1_Window)\n self.TB1_Window.show()", "def setupUi(self, MainWindowBase):\r\n Ui_MainWindow.setupUi(self, MainWindowBase)\r\n self.widget = MainWindowBase\r\n QObject.connect(self.actionAbout,SIGNAL(\"triggered()\"),self.openAbout)\r\n QObject.connect(self.actionFileOpen,SIGNAL(\"triggered()\"),self.openFile)\r\n self.statusBar().showMessage(self.tr(\"Ready\"))", "def iniciaUI(self):\n\n self.setGeometry(100,100, 250, 250)\n self.setWindowTitle(\"Login\")\n self.displayWidgets()\n\n self.show()", "def showUI(cls):\r\n win = cls(uiFile)\r\n win.create()\r\n return win", "def set_type_gui(self, index):\n # Regenerate the pages with the new values\n self.w_mag.setParent(None)\n self.w_mag = self.wid_list[index](self.machine)\n self.w_mag.saveNeeded.connect(self.emit_save)\n\n # Refresh the GUI\n self.main_layout.removeWidget(self.w_mag)\n self.main_layout.insertWidget(2, self.w_mag)", "def init_UI(self):\n # widgets\n self.modeComboBox = QtWidgets.QComboBox()\n self.updateBtn = QtWidgets.QPushButton('Force Update')\n self.pauseBtn = QtWidgets.QPushButton()\n self.pauseBtn.setCheckable(True)\n\n self.clearBtn = QtWidgets.QPushButton()\n self.clearBtn.setIcon(QtGui.QIcon(IconPaths.ICON_CLEAR_FILES))\n self.filterBtn = QtWidgets.QToolButton()\n self.filterBtn.setIcon(QtGui.QIcon(IconPaths.ICON_FILTER))\n self.filterBtn.setMinimumWidth(35)\n self.filterBtn.setStyleSheet('QToolButton::menu-indicator {subcontrol-position: center right; height: 7px}')\n self.filterBtn.setPopupMode(QtWidgets.QToolButton.InstantPopup)\n self.prefBtn = QtWidgets.QToolButton()\n self.prefBtn.setIcon(QtGui.QIcon(IconPaths.ICON_SETTINGS))\n #self.pBar = QtWidgets.QProgressBar()\n self.itemView = LocalizeView()\n self.itemView.setModel(self.proxy_model)\n self.autoScrollCB = QtWidgets.QCheckBox('Auto scroll to localizing files')\n self.autoScrollCB.setChecked(True)\n\n # tweak sizes so the widgets all line up vertically with Nuke's style\n self.modeComboBox.setMinimumHeight(self.updateBtn.sizeHint().height())\n self.pauseBtn.setMaximumSize(self.updateBtn.sizeHint())\n self.clearBtn.setMaximumSize(self.updateBtn.sizeHint())\n\n # mode menu\n self.modeLabel = QtWidgets.QLabel('Mode')\n self.modeComboBox.addItems(['On', 'Manual', 'Off'])\n\n # update menu\n self.updateMenu = QtWidgets.QMenu()\n self.act_forceUpdateAll = QtWidgets.QAction('All', self)\n self.act_forceUpdateSelectedNodes = QtWidgets.QAction('Selected', self)\n self.act_forceUpdateOnDemand = QtWidgets.QAction('On demand only', self) \n self.updateMenu.addAction(self.act_forceUpdateAll)\n self.updateMenu.addAction(self.act_forceUpdateSelectedNodes)\n self.updateMenu.addAction(self.act_forceUpdateOnDemand)\n self.updateBtn.setMenu(self.updateMenu)\n\n # clear menu\n self.clearMenu = QtWidgets.QMenu()\n self.clearMenu.addAction(QtWidgets.QAction('All local files', self, triggered=self.__delete_all_local_files))\n self.clearMenu.addAction(QtWidgets.QAction('Unused local files', self, triggered=_open_delete_dialog))\n self.clearBtn.setMenu(self.clearMenu)\n\n # filter menu\n self.filterMenu = QtWidgets.QMenu(self.filterBtn)\n self.act_filter_all = QtWidgets.QAction('All', self.filterMenu, checkable=True)\n self.act_filter_in_progress = QtWidgets.QAction('In Progress', self.filterMenu, checkable=True)\n self.act_filter_up_to_date = QtWidgets.QAction('Up to date', self.filterMenu, checkable=True)\n self.act_filter_out_of_date = QtWidgets.QAction('Out of date', self.filterMenu, checkable=True)\n self.act_filter_from_source = QtWidgets.QAction('Reading from source', self.filterMenu, checkable=True)\n self.act_filter_disabled = QtWidgets.QAction('Disabled', self.filterMenu, checkable=True)\n self.act_filter_not_localized = QtWidgets.QAction('Not Localized', self.filterMenu, checkable=True)\n\n self.act_filter_in_progress.setData(Status.IN_PROGRESS)\n self.act_filter_up_to_date.setData(Status.UP_TO_DATE)\n self.act_filter_out_of_date.setData(Status.OUT_OF_DATE)\n self.act_filter_from_source.setData(Status.READ_FROM_SOURCE)\n self.act_filter_disabled.setData(Status.DISABLED)\n self.act_filter_not_localized.setData(Status.NOT_LOCALIZED)\n for act in (self.act_filter_all, self.act_filter_in_progress, self.act_filter_up_to_date, self.act_filter_out_of_date,\n self.act_filter_from_source, self.act_filter_disabled, self.act_filter_not_localized):\n self.filterMenu.addAction(act)\n self.filterBtn.setMenu(self.filterMenu)\n\n # tooltips\n self.modeComboBox.setToolTip('Sets the global localization mode.\\nThis is the same as using the options in the Cache/Localization/Mode menu.')\n self.updateBtn.setToolTip('Forces the update of localized files.\\nThis is the same as using the options in the Cache/Localization/Force Update menu.')\n self.pauseBtn.setToolTip('Pauses/Resumes file localization.\\nThis is the same as Cache/Localization/Pause.')\n self.clearBtn.setToolTip('''Allows for clearing localized files.\\nTwo modes are supported:\n \"All local files\" - this will delete all files in {}\n \"Unused local files\" - this will only delete unused local files (same as Cache/Localization/Clear Unused Local Files)'''.format(nuke.toNode('preferences')['localCachePath'].evaluate()))\n self.filterBtn.setToolTip('Sets a view filter the table.')\n self.prefBtn.setToolTip('Open the preferences.')\n\n # layouts\n layout = QtWidgets.QVBoxLayout()\n btnLayout = QtWidgets.QHBoxLayout()\n btnLayout.addWidget(self.modeLabel)\n btnLayout.addWidget(self.modeComboBox)\n btnLayout.addWidget(self.updateBtn)\n btnLayout.addWidget(self.pauseBtn)\n btnLayout.addWidget(self.clearBtn)\n btnLayout.addStretch()\n btnLayout.addWidget(self.filterBtn)\n btnLayout.addWidget(self.prefBtn)\n layout.addLayout(btnLayout)\n #layout.addWidget(self.pBar)\n layout.addWidget(self.itemView)\n layout.addWidget(self.autoScrollCB)\n layout.setAlignment(self.autoScrollCB, QtCore.Qt.AlignRight)\n self.setLayout(layout)", "def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(-30)\n c.elevation(20)\n s.render()", "def skeletonSettings_UI(self, name, width, height, checkable):\n\n # add the groupbox for this module with the module name and module settings\n self.groupBox = QtWidgets.QGroupBox(name)\n self.groupBox.setGeometry(QtCore.QRect(0, 0, width, height))\n self.groupBox.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed))\n\n if not checkable:\n self.groupBox.setMinimumSize(QtCore.QSize(width, height))\n\n if checkable:\n self.groupBox.setMinimumSize(QtCore.QSize(width, 0))\n\n self.groupBox.setMaximumSize(QtCore.QSize(width, height))\n self.groupBox.setFlat(True)\n self.groupBox.setCheckable(checkable)\n\n self.lockButton = QtWidgets.QPushButton()\n self.lockButton.setMinimumSize(QtCore.QSize(20, 20))\n self.lockButton.setMaximumSize(QtCore.QSize(20, 20))\n\n # load style sheet file\n styleSheetFile = utils.returnNicePath(self.toolsPath,\n \"Core/Scripts/Interfaces/StyleSheets/skeletonSettings.qss\")\n f = open(styleSheetFile, \"r\")\n style = f.read()\n f.close()\n\n self.groupBox.setStyleSheet(style)\n\n # set properties for filtering later\n self.groupBox.setObjectName(name)\n self.groupBox.setProperty(\"name\", name)\n\n # set context menu policy on groupbox\n self.groupBox.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.groupBox.customContextMenuRequested.connect(self.createContextMenu)", "def _setupUi(self, widget):\n \n widget._setup_vertical_layout()\n widget._setup_horizontal_layout()\n widget._setup_vertical_layout()\n for field in self._fields:\n if field=='channel_idx':\n widget._exit_layout()\n widget._setup_vertical_layout()\n choices = None\n if hasattr(self, field + 's'):\n choices = self.__getattribute__(field + 's')\n widget._setup_gui_element(field, choices)\n widget._exit_layout()\n widget._exit_layout()\n self._setup_fetch_buttons(widget)", "def __init__(self, parent=None):\n super(union_Dialog, self).__init__(parent)\n self.setupUi(self)", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def createMirrorOfModule_UI(self):\n\n # copy the settings of the module\n self.copySettings()\n\n # get basename and classname\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n className = cmds.getAttr(networkNode + \".moduleType\")\n\n # launch a UI to get the name information\n self.mirrorWindow = QtWidgets.QMainWindow()\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/mainScheme.qss\")\n f = open(styleSheetFile, \"r\")\n style = f.read()\n f.close()\n\n self.mirrorWindow.setStyleSheet(style)\n\n # size policies\n mainSizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mirrorWindow.setCentralWidget(self.mainWidget)\n\n # set qt object name\n self.mirrorWindow.setObjectName(\"ART_createMirrorModuleUI\")\n self.mirrorWindow.setWindowTitle(\"Create Mirror Module\")\n\n # create the mainLayout for the rig creator UI\n self.mainLayout = QtWidgets.QVBoxLayout(self.mainWidget)\n self.mainLayout.setContentsMargins(0, 0, 0, 0)\n\n self.mirrorWindow.resize(300, 150)\n self.mirrorWindow.setSizePolicy(mainSizePolicy)\n self.mirrorWindow.setMinimumSize(QtCore.QSize(300, 150))\n self.mirrorWindow.setMaximumSize(QtCore.QSize(300, 150))\n\n # create the background image\n self.frame = QtWidgets.QFrame()\n self.mainLayout.addWidget(self.frame)\n\n # create the layout for the widgets\n self.widgetLayout = QtWidgets.QVBoxLayout(self.frame)\n\n # create the prefix pair of fields\n self.prefixForm = QtWidgets.QFormLayout()\n self.widgetLayout.addLayout(self.prefixForm)\n\n self.prefixLabel = QtWidgets.QLabel(\"Prefix: \")\n self.prefixForm.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.prefixLabel)\n\n self.prefix = QtWidgets.QLineEdit()\n self.prefixForm.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.prefix)\n\n # hookup signal/slot connection\n self.prefix.textChanged.connect(partial(self.updatePreview, baseName))\n\n # create the suffix pair of fields\n self.suffixForm = QtWidgets.QFormLayout()\n self.widgetLayout.addLayout(self.suffixForm)\n\n self.suffixLabel = QtWidgets.QLabel(\"Suffix: \")\n self.suffixForm.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.suffixLabel)\n\n self.suffix = QtWidgets.QLineEdit()\n self.suffixForm.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.suffix)\n\n # hookup signal/slot connection\n self.suffix.textChanged.connect(partial(self.updatePreview, baseName))\n\n # spacer\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.widgetLayout.addItem(spacerItem)\n\n # realtime preview of final module name\n self.previewForm = QtWidgets.QFormLayout()\n self.widgetLayout.addLayout(self.previewForm)\n self.previewLabel = QtWidgets.QLabel(\"Preview: \")\n self.previewName = QtWidgets.QLabel(baseName)\n self.previewName.setMinimumSize(QtCore.QSize(200, 20))\n self.previewName.setMaximumSize(QtCore.QSize(200, 20))\n self.previewName.setAlignment(QtCore.Qt.AlignHCenter)\n self.previewForm.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.previewLabel)\n self.previewForm.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.previewName)\n\n # set preview font\n font = QtGui.QFont()\n font.setPointSize(12)\n self.previewName.setFont(font)\n\n spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.widgetLayout.addItem(spacerItem1)\n\n # create button\n self.createButton = QtWidgets.QPushButton(\"CREATE\")\n self.createButton.setObjectName(\"blueButton\")\n self.widgetLayout.addWidget(self.createButton)\n self.createButton.setMinimumSize(QtCore.QSize(285, 40))\n self.createButton.setMaximumSize(QtCore.QSize(285, 40))\n self.createButton.setSizePolicy(mainSizePolicy)\n font = QtGui.QFont()\n font.setPointSize(12)\n self.createButton.setFont(font)\n\n # hookup signal/slot on create button\n self.createButton.clicked.connect(self.createMirrorModule)\n\n # show the window\n self.mirrorWindow.show()", "def update_view(self): \n raise NotImplementedError(\"Widget descendents MUST implement the update_view() method!\")", "def showGUI(self,**kwargs):\n self.baxter.menu.select(self.modes[0])", "def _ui_content(self):\n\n # Cleat the tree\n self.clear()\n\n # Set the font\n font = QtGui.QFont()\n font.setPointSize(11)\n\n # Add the id sets and set items\n for id_set, id_dict in sorted(self.scnData.items()):\n tree_item = QtGui.QTreeWidgetItem(self)\n\n tree_item.setText(0, id_set)\n tree_item.setFont(0, font)\n\n icon_folder = os.path.dirname(os.path.abspath(__file__))\n icon_path = os.path.join(icon_folder, \"icons\", \"IdSet.png\")\n\n tree_item.setIcon(0, QtGui.QIcon(icon_path))\n\n tree_item.setData(0, QtCore.Qt.UserRole, \"set\")\n tree_item.setData(1, QtCore.Qt.UserRole, id_set)\n\n for id_color, id_objects in sorted(id_dict.items()):\n if id_color != \"Holdout\":\n self._add_id_color(id_objects,\n id_color,\n tree_item)\n\n self._add_id_color(id_dict[\"Holdout\"], \"Holdout\", tree_item)\n\n return", "def __call__(self):\n self.show()", "def update_view(self, selected):\n pass", "def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)", "def _initUI(self):\n\n self.setWindowTitle(\"HB Havens: onzekerheden\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n hlayout = QtWidgets.QHBoxLayout()\n\n vlayout = QtWidgets.QVBoxLayout()\n\n # Radio buttons\n #----------------------------------------------------------------\n self.button1 = QtWidgets.QRadioButton('Onzekerheden uit steunpunt overnemen')\n self.button2 = QtWidgets.QRadioButton('Onzekerheden uit havenmodel overnemen')\n self.button3 = QtWidgets.QRadioButton('Combinatie van bovenstaande gebruiken')\n\n vlayout.addWidget(self.button1)\n vlayout.addWidget(self.button2)\n vlayout.addWidget(self.button3)\n vlayout.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding))\n\n hlayout.addLayout(vlayout)\n\n vlayout = QtWidgets.QVBoxLayout()\n # Model uncertainties support location\n #----------------------------------------------------------------\n label = QtWidgets.QLabel()\n label.setText('Modelonzekerheden in steunpunt:')\n vlayout.addWidget(label)\n\n self.supportloc_unc_table = widgets.DataFrameWidget(self.supportloc_unc)\n self.supportloc_unc_table.fixed_fit_to_content(90)\n vlayout.addWidget(self.supportloc_unc_table)\n\n label = QtWidgets.QLabel()\n label.setText('Modelonzekerheden in havenmodel (zelf invullen):')\n vlayout.addWidget(label)\n\n self.harbor_unc_table = widgets.DataFrameWidget(self.harbor_unc, editing_enabled=True)\n self.harbor_unc_table.fixed_fit_to_content(90)\n vlayout.addWidget(self.harbor_unc_table)\n\n label = QtWidgets.QLabel()\n label.setText('Gecombineerde modelonzekerheid (berekenen):')\n vlayout.addWidget(label)\n\n calc_button = QtWidgets.QPushButton('Berekenen')\n calc_button.clicked.connect(self._calc_combined_uncertainty)\n vlayout.addWidget(calc_button)\n\n self.combined_unc_table = widgets.DataFrameWidget(self.combined_unc)\n self.combined_unc_table.fixed_fit_to_content(90)\n vlayout.addWidget(self.combined_unc_table)\n\n for table in [self.supportloc_unc_table, self.harbor_unc_table, self.combined_unc_table]:\n table.setShowGrid(True)\n table.setAlternatingRowColors(False)\n\n hlayout.addLayout(vlayout)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n vlayout.addWidget(line)\n\n\n # Buttons\n #----------------------------------------------------------------\n hbox = QtWidgets.QHBoxLayout()\n hbox.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum))\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n hbox.addWidget(self.closebutton)\n # Add ok/close\n self.savebutton = QtWidgets.QPushButton('Opslaan')\n self.savebutton.clicked.connect(self._save)\n hbox.addWidget(self.savebutton)\n\n vlayout.addLayout(hbox)\n\n # Add layout to widget\n self.setLayout(vlayout)\n self.layout().setSpacing(10)", "def setupUi(self, obj):\n obj.layout = QVBoxLayout()\n obj.setLayout(obj.layout)\n\n obj.bttn_cam_connect = QPushButton(\"Connect to Jet Cam\")\n obj.bttn_cam_disconnect = QPushButton(\"Disconnect\")\n obj.bttn_cam_calibrate = QPushButton(\"Calibrate\")\n obj.bttngrp1 = QButtonGroup()\n obj.rd_bttn_com_off = QRadioButton(\"COM detection off\")\n obj.rd_bttn_com_off.setChecked(True)\n obj.rd_bttn_com_on = QRadioButton(\"COM detection on\")\n obj.bttngrp1.addButton(obj.rd_bttn_com_off, id=0)\n obj.bttngrp1.addButton(obj.rd_bttn_com_on, id=1)\n obj.bttngrp1.setExclusive(True)\n\n obj.lbl_morph = QLabel(\"Morphological Operations\")\n\n obj.lbl_dilate = QLabel(\"Dilate edges\")\n obj.slider_dilate = QSlider(Qt.Horizontal)\n obj.slider_dilate.setMinimum(0)\n obj.slider_dilate.setMaximum(10)\n obj.slider_dilate.setTickPosition(QSlider.TicksBelow)\n obj.slider_dilate.setTickInterval(1)\n\n obj.rd_bttn_dilate = QRadioButton(\"Dilate On/Off\")\n obj.rd_bttn_dilate.setChecked(True)\n obj.rd_bttn_dilate.setAutoExclusive(False)\n\n obj.lbl_erode = QLabel(\"Erode edges\")\n obj.slider_erode = QSlider(Qt.Horizontal)\n obj.slider_erode.setMinimum(0)\n obj.slider_erode.setMaximum(10)\n obj.slider_erode.setTickPosition(QSlider.TicksBelow)\n obj.slider_erode.setTickInterval(1)\n\n obj.rd_bttn_erode = QRadioButton(\"Erode On/Off\")\n obj.rd_bttn_erode.setAutoExclusive(False)\n\n obj.lbl_open_close = QLabel(\"Open/Close\\nOpening is Erosion followed \"\n \"by Dilation,\\nit is good for removing \"\n \"small blobs from an image (remove salt \"\n \"noise).\\nClose is Dilation followed by \"\n \"Erosion,\\nit is good for closing holes \"\n \"inside of objects (remove pepper noise)\"\n \"\\n\\n\")\n obj.lbl_open = QLabel(\"Open\")\n obj.slider_open = QSlider(Qt.Horizontal)\n obj.slider_open.setMinimum(0)\n obj.slider_open.setMaximum(10)\n obj.slider_open.setTickPosition(QSlider.TicksBelow)\n obj.slider_open.setTickInterval(1)\n\n obj.rd_bttn_open = QRadioButton(\"Open On/Off\")\n obj.rd_bttn_open.setChecked(True)\n obj.rd_bttn_open.setAutoExclusive(False)\n\n obj.lbl_close = QLabel(\"Close\")\n obj.slider_close = QSlider(Qt.Horizontal)\n obj.slider_close.setMinimum(0)\n obj.slider_close.setMaximum(10)\n obj.slider_close.setTickPosition(QSlider.TicksBelow)\n obj.slider_close.setTickInterval(1)\n\n obj.rd_bttn_close = QRadioButton(\"Close On/Off\")\n obj.rd_bttn_close.setAutoExclusive(False)\n\n obj.lbl_brightness = QLabel(\"Brightness\")\n obj.slider_brightness = QSlider(Qt.Horizontal)\n\n obj.lbl_contrast = QLabel(\"Contrast\")\n obj.slider_contrast = QSlider(Qt.Horizontal)\n\n obj.lbl_blur = QLabel(\"Blur\")\n obj.slider_blur = QSlider(Qt.Horizontal)\n\n obj.lbl_thresh = QLabel(\"Threshold\")\n obj.range_slider_thresh = QRangeSlider(obj, left_thumb_value=110)\n\n obj.bttn_search = QPushButton(\"Search for Jet\")\n obj.bttn_reset_all = QPushButton(\"Reset All Image Morphologies\")\n\n obj.text_area = QTextEdit(\"~~~read only information for user~~~\")\n obj.text_area.setReadOnly(True)\n\n obj.layout_cam1 = QHBoxLayout()\n obj.layout_cam1.addWidget(obj.bttn_cam_connect)\n\n obj.layout_cam2 = QHBoxLayout()\n obj.layout_cam2.addWidget(obj.bttn_cam_disconnect)\n\n obj.layout_com = QHBoxLayout()\n obj.layout_com.addWidget(obj.rd_bttn_com_off)\n obj.layout_com.addWidget(obj.rd_bttn_com_on)\n\n obj.layout_dilate = QHBoxLayout()\n obj.layout_dilate.addWidget(obj.slider_dilate)\n obj.layout_dilate.addWidget(obj.rd_bttn_dilate)\n\n obj.layout_erode = QHBoxLayout()\n obj.layout_erode.addWidget(obj.slider_erode)\n obj.layout_erode.addWidget(obj.rd_bttn_erode)\n\n obj.layout_close = QHBoxLayout()\n obj.layout_close.addWidget(obj.slider_close)\n obj.layout_close.addWidget(obj.rd_bttn_close)\n\n obj.layout_open = QHBoxLayout()\n obj.layout_open.addWidget(obj.slider_open)\n obj.layout_open.addWidget(obj.rd_bttn_open)\n\n obj.layout_thresh = QHBoxLayout()\n obj.layout_thresh.addWidget(obj.lbl_thresh)\n obj.layout_thresh.addWidget(obj.range_slider_thresh)\n\n obj.layout_blur = QHBoxLayout()\n obj.layout_blur.addWidget(obj.lbl_blur)\n obj.layout_blur.addWidget(obj.slider_blur)\n\n obj.layout_brightness = QHBoxLayout()\n obj.layout_brightness.addWidget(obj.lbl_brightness)\n obj.layout_brightness.addWidget(obj.slider_brightness)\n\n obj.layout_contrast = QHBoxLayout()\n obj.layout_contrast.addWidget(obj.lbl_contrast)\n obj.layout_contrast.addWidget(obj.slider_contrast)\n\n obj.layout_bttns = QVBoxLayout()\n obj.layout_bttns.addWidget(obj.bttn_search)\n obj.layout_bttns.addWidget(obj.bttn_reset_all)\n\n obj.layout.addStretch()\n obj.layout.addLayout(obj.layout_cam1)\n obj.layout.addLayout(obj.layout_cam2)\n obj.layout.addWidget(obj.bttn_cam_calibrate)\n\n obj.layout.addLayout(obj.layout_com)\n\n obj.hline0 = QHLine()\n obj.layout.addWidget(obj.hline0)\n obj.layout.addWidget(obj.lbl_dilate)\n obj.layout.addLayout(obj.layout_dilate)\n obj.hline1 = QHLine()\n obj.layout.addWidget(obj.hline1)\n obj.layout.addWidget(obj.lbl_erode)\n obj.layout.addLayout(obj.layout_erode)\n obj.hline2 = QHLine()\n obj.layout.addWidget(obj.hline2)\n obj.layout.addWidget(obj.lbl_open)\n obj.layout.addLayout(obj.layout_open)\n obj.hline3 = QHLine()\n obj.layout.addWidget(obj.hline3)\n obj.layout.addWidget(obj.lbl_close)\n obj.layout.addLayout(obj.layout_close)\n # obj.hline4 = QHLine()\n # obj.layout.addWidget(obj.hline4)\n # obj.layout.addWidget(obj.lbl_open)\n # obj.layout.addWidget(obj.slider_open)\n obj.hline5 = QHLine()\n obj.layout.addWidget(obj.hline5)\n obj.layout.addLayout(obj.layout_brightness)\n obj.layout.addLayout(obj.layout_contrast)\n obj.hline6 = QHLine()\n obj.layout.addWidget(obj.hline6)\n obj.layout.addLayout(obj.layout_blur)\n obj.hline7 = QHLine()\n obj.layout.addWidget(obj.hline7)\n obj.layout.addLayout(obj.layout_thresh)\n obj.hline8 = QHLine()\n obj.layout.addWidget(obj.hline8)\n obj.layout.addLayout(obj.layout_bttns)\n obj.layout.addStretch()\n obj.hline9 = QHLine()\n obj.layout.addWidget(obj.hline9)\n obj.layout.addWidget(obj.text_area)\n obj.layout.addStretch()", "def __setup_ui(self):\n self.pixel_label = QLabel(\"\", self)\n self.pixel_label.setFixedWidth(100)\n self.pixel_coords_label = QLabel(\"\", self)\n self.statusBar().addPermanentWidget(self.pixel_coords_label)\n self.statusBar().addPermanentWidget(self.pixel_label)\n\n self.current_fps_label = QLabel(\"\", self)\n self.statusBar().addPermanentWidget(self.current_fps_label)\n\n self.toolbar = self.addToolBar(\"default\")\n self.toolbar.setMovable(False)\n self.setContextMenuPolicy(Qt.NoContextMenu)\n\n exit_act = QAction(QIcon.fromTheme('exit'), 'Exit', self)\n exit_act.setShortcut('Ctrl+Q')\n exit_act.setStatusTip(\"Exit application\")\n exit_act.triggered.connect(self.app.quit)\n self.toolbar.addAction(exit_act)\n\n preferences_action = QAction(QIcon.fromTheme(\"preferences-desktop\"),\n \"Preferences\", self)\n preferences_action.setStatusTip(\"Open preferences dialog\")\n preferences_action.triggered.connect(self.open_preferences)\n self.toolbar.addAction(preferences_action)\n\n self.device_label = QLabel(\"Device:\")\n self.device_combo = QComboBox(self)\n self.device_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n # self.device_combo.setMinimumWidth(300)\n self.device_combo.activated[str].connect(self.on_device_selected)\n self.toolbar.addWidget(self.device_label)\n self.toolbar.addWidget(self.device_combo)\n\n self.format_label = QLabel(\"Format:\")\n self.format_combo = QComboBox(self)\n self.format_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.format_combo.setMinimumWidth(150)\n self.format_combo.activated[str].connect(self.on_format_selected)\n self.toolbar.addWidget(self.format_label)\n self.toolbar.addWidget(self.format_combo)\n\n self.resolution_label = QLabel(\"Resolution:\")\n self.resolution_combo = TcamComboBox(self, \"Select Resolution\")\n self.resolution_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.resolution_combo.activated[str].connect(self.on_resolution_selected)\n self.toolbar.addWidget(self.resolution_label)\n self.toolbar.addWidget(self.resolution_combo)\n\n self.fps_label = QLabel(\"FPS:\")\n self.fps_combo = TcamComboBox(self, \"Select FPS:\")\n self.fps_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.fps_combo.activated[str].connect(self.on_fps_selected)\n self.toolbar.addWidget(self.fps_label)\n self.toolbar.addWidget(self.fps_combo)\n\n self.save_image = QAction(\"Save Image\", self)\n self.save_image.setIcon(QIcon.fromTheme(\"insert-image\"))\n\n self.save_image.triggered.connect(self.save_image_action)\n self.toolbar.addAction(self.save_image)\n\n self.fit_to_win = QAction(\"Fit To Window\", self)\n self.fit_to_win.setIcon(QIcon.fromTheme(\"zoom-fit-best\"))\n\n self.fit_to_win.triggered.connect(self.fit_to_window)\n self.toolbar.addAction(self.fit_to_win)\n\n self.props_action = QAction(\"\", self)\n self.props_action.setText(\"Properties\")\n self.props_action.setVisible(False)\n self.props_action.triggered.connect(self.toggle_properties_dialog)\n self.toolbar.addAction(self.props_action)\n\n self.recording_action = QAction(\"\", self)\n self.recording_action.setIcon(QIcon.fromTheme(\"media-record\"))\n self.recording_action.setIconText(\"Start recording\")\n self.recording_action.setText(\"Start recording\")\n self.recording_action.triggered.connect(self.start_recording_video)\n self.toolbar.addAction(self.recording_action)\n\n self.set_device_menus_enabled(False)\n\n self.view = None", "def init_ui(self):\n\n # Display default values until first update\n self.window.pcBatteryDisplay.setValue(100)\n self.window.pcCpuDisplay.setValue(0)", "def create_widgets( self ):", "def ui_setup(self):\n loader = QUiLoader()\n file = QFile('./user_interface/form/main_window.ui')\n file.open(QFile.ReadOnly)\n self._window = loader.load(file)\n file.close()\n\n status_bar = QStatusBar(self._window)\n status_bar.showMessage(__copyright__)\n self._window.setStatusBar(status_bar)\n self._window.setWindowIcon(QIcon('./user_interface/media/bucketing_icon.jpeg'))\n self._window.setWindowTitle('PySide2 Project - Basic UI Framework')\n\n self._option_panel = OptionPanel()\n self._option_panel.add_button('DekBan', './user_interface/media/dekban.png')\n self._option_panel.add_button('Charlie', './user_interface/media/charlie.jpeg')\n self._option_panel.add_button('Simon', './user_interface/media/Simon.jpeg')\n\n # Add widget to main layout\n main_layout = self._window.main_layout\n main_layout.itemAtPosition(0, 0).setAlignment(QtCore.Qt.AlignCenter)\n main_layout.itemAtPosition(0, 1).setAlignment(QtCore.Qt.AlignVCenter)\n main_layout.addWidget(self._option_panel, 2, 0, 1, 1)\n\n # Add page widget to stack\n self._pages['item'] = ItemWidget()\n self._pages['text1'] = TextPage(text=PAUSE_TEXT)\n self._pages['text2'] = TextPage(text=STOP_TEXT)\n\n for index, name in enumerate(self._pages):\n print('pages {} : {} page'.format(index, name))\n self._window.widget_stack.addWidget(self._pages[name].widget)\n\n self._window.widget_stack.setCurrentIndex(0)\n\n # Build up signal / slot\n self._option_panel.currentItemChanged.connect(self.set_page)" ]
[ "0.7031982", "0.6457756", "0.6457756", "0.6440338", "0.64161724", "0.63023686", "0.6291685", "0.6243074", "0.61494553", "0.61151314", "0.6093119", "0.6090017", "0.6085131", "0.60317594", "0.60222656", "0.60190624", "0.60181403", "0.6016506", "0.5963193", "0.59621996", "0.5954504", "0.59279686", "0.5922433", "0.5894824", "0.5876441", "0.5863867", "0.58485997", "0.5809992", "0.5808063", "0.5794273", "0.57935405", "0.5791238", "0.5791238", "0.5791238", "0.5773656", "0.5765092", "0.57593066", "0.57511884", "0.5745173", "0.5736303", "0.5732973", "0.5723987", "0.5720827", "0.5720148", "0.571727", "0.57156205", "0.56906444", "0.56901467", "0.56901366", "0.56787515", "0.5665164", "0.5637824", "0.56345505", "0.56318045", "0.56318045", "0.5624017", "0.56157255", "0.56132066", "0.5612727", "0.56060606", "0.5598768", "0.5587861", "0.55700564", "0.5564186", "0.5552537", "0.5540682", "0.5533996", "0.553053", "0.55273026", "0.5525713", "0.55161965", "0.55123985", "0.5505809", "0.55046403", "0.55031437", "0.5500546", "0.55004287", "0.54889923", "0.54813886", "0.5480396", "0.5475115", "0.54633635", "0.545886", "0.54557866", "0.5445565", "0.54405063", "0.54339826", "0.5422483", "0.5421139", "0.5417216", "0.54170656", "0.5414859", "0.54128563", "0.54090714", "0.5404635", "0.54036653", "0.5396462", "0.53956944", "0.5388516", "0.5380191" ]
0.7076183
0
Page of general settings
def general(request): return render(request, 'general.html', {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printSettings():\n print \">>>\\n>>> SettingsTool: global variables:\"\n for variable, value in globals().items():\n if variable.count('__')>1: continue\n print \">>> %-16s = %s\"%(variable,value)\n print \">>>\"", "def showSettings():\n cq = dz()\n cq.abag()", "def settings_view():\n return template('settings.html')", "def generalHelp(self):\n rf = os.path.join('docs','helpButtons','prefsGeneral.html')\n self.showHelpFile( rf )", "def setting():\n return render_template('setting.html', year=datetime.now().year)", "def load_settings(self):\n\n self.std = settings.settings", "def getNewDefaultWikiSettingsPage(mainControl):\r\n return _(u\"\"\"++ Wiki Settings\r\n\r\nThese are your default global settings.\r\n\r\n[global.importance.low.color: grey]\r\n[global.importance.high.bold: true]\r\n[global.contact.icon: contact]\r\n[global.wrap: 70]\r\n\r\n[icon: cog]\r\n\"\"\") # TODO Localize differently?\r", "def settings(self):\n return {}", "def settings(request):\n from django.conf import settings\n return {\n 'exhibitBaseTemplate': settings.EXHIBIT_TEMPLATE,\n 'thumbnailUrl': settings.THUMBNAIL_URL,\n 'calisphere': settings.CALISPHERE\n }", "def settings(request):\n return {\"SETTINGS\": django_settings, \"GLOBAL_DEFINITIONS\": global_definitions}", "async def settings(self, ctx: BBContext):\n pass", "def settings( self, selection ):\r\n if( self.__optionsDatabase.showOptionsDatabase() ):\r\n self.main( selection )", "def check_general_settings(self):\n\n self.generate_description_general_settings()\n if not self.config.has_section('description'):\n colorprint('[description] in [{self.settingpath}] did not exist. Created it.'.format(**locals()), 'green')\n\n if not self.config.has_section('credentials'):\n self.config.add_section('credentials')\n self.config.set('credentials', 'world', 'de100')\n self.config.set('credentials', 'username', 'yournamegoeshere')\n self.config.set('credentials', 'password', 'yourpasswordgoeshere')\n self.config.set('credentials', 'captcha_user', 'yourdeathbycaptchaussernamegoeshere')\n self.config.set('credentials', 'captcha_pass', 'yourdeathbycaptchapasswordgoeshere')\n colorprint('[credentials] in [{self.settingpath}] did not exist. Created it.'.format(**locals()), 'green')\n\n if not self.config.has_section('control'):\n self.config.add_section('control')\n self.config.set('control', 'sleep', '300')\n self.config.set('control', 'farmsplit', '5')\n colorprint('[control] in [{self.settingpath}] did not exist. Created it.'.format(**locals()), 'green')\n\n if not self.config.has_section('storage'):\n self.config.add_section('storage')\n self.config.set('storage', 'path', 'data')\n colorprint('[storage] in [{self.settingpath}] did not exist. Created it.'.format(**locals()), 'green')\n\n self.config.write(open(self.settingpath, 'w'))", "def configuration():", "def render_settings_view():\n return render_template('settings_screen.html', realsense_device_status=realsense_enabled, detector_enabled=enabled_detector)", "def printConfig():\n # Why not log instead? Are we asking user to confirm settings?\n pass # until implemented", "def settings():\n raise NotImplementedError # pragma: nocoverage", "def openSettings(self):\r\n pass", "def settings():\n return _get_settings()[1]", "def onSettings(self):\n pass", "def settings(self) -> Dict[str, Any]:\n return {}", "def config (self):\n import wikicode\n class Config (wikicode.extension):\n def run (self):\n self.send_page (\"Generic DC Setup\")\n wikicode.run_extension (Config)", "def showSettings(self):\n self.c.show()", "def config():", "def config():", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def print_settings(config):\n print(\"----------------------------------------\")\n print(\"SETTINGS\")\n print(\"----------------------------------------\")\n for key, value in config:\n print(\"%s=%s\" % (key, value))\n print(\"----------------------------------------\")", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def global_settings(request):\n return {\n 'OFFLINE_MODE_CAPABILITY_ENABLED': settings.OFFLINE_MODE_CAPABILITY_ENABLED\n }", "def _define_settings(self):\n\n self.settings = {}\n\n ##### ORIGINALLY IN THE DOMAIN FILE #######\n\n # Maximum input in the C-Space : no constituent can be more than 100% present\n self.settings['maxInp'] = 1\n\n #### ORIGINALLY IN THE SETTINGS FILE #####\n self.settings[\"epochs\"] = 3 # Training epochs\n self.settings[\"tgtStd\"] = 12e-6\n self.settings['TInit'] = 1e-6\n self.settings[\"TMin\"] = 0\n self.settings[\"TDecayRate\"] = 0.05\n self.settings[\"lambdaInit\"] = 0.011387\n self.settings['lambdaMin'] = 0.0001\n self.settings[\"lambdaDecayRate\"] = 0.60\n self.settings[\"maxSteps\"] = 300000\n self.settings[\"emaSpeedTol\"] = 0.009\n self.settings[\"emaFactor\"] = .005\n self.settings[\"printInterval\"] = 3000\n self.settings[\"summary_file\"] = \"data/summary.txt\"\n mean = torch.ones(self.grammar.bind.nF,\n self.grammar.bind.nR)/self.grammar.bind.nF\n self.settings[\"initStateMean\"] = mean\n self.settings[\"initStateStdev\"] = .025\n self.settings['clamp'] = False\n\n if self.custom_settings is not None:\n for key, value in self.custom_settings.items():\n if key in self.settings:\n self.settings[key] = value", "def page_settings(state):\n\n st.title(\":wrench: Settings\")\n st.markdown(\"## **Your chosen settings:**\")\n display_state_values(state)\n\n st.write(\"---\")\n st.markdown(\"#### Enter Stock Ticker Symbols:\")\n state.stocks = st.text_input(\n \"Enter Stock Symbols Separated by Commas (EX: AAPL, MSFT):\",\n state.stocks or \"\",\n )\n\n state.stocks = state.stocks\n state.stocks_list = state.stocks.split(\", \")\n\n st.markdown(\"#### Choose dataset size to train models with:\")\n options = [\"5d\", \"1mo\", \"3mo\", \"6mo\", \"1y\", \"5y\", \"10y\", \"max\"]\n\n state.period = st.radio(\n \"Choose amount of historical training data. 1 year is recommended, find more recommendations on homepage.\",\n options,\n options.index(state.radio) if state.radio else 0,\n )\n\n if st.button(\"Run the Tool\", state.run_button):\n state.run_button_checked = True\n st.markdown(\n \"### *PLEASE WAIT! Scraping data, training models, and generating prediction results NOW!*\"\n )\n state.scraped_data = scraper.perform_scraping(state.stocks_list, state.period)\n state.finalized_data = prediction.run_predictor(\n state.scraped_data, state.period\n )\n\n if state.run_button_checked == True:\n st.markdown(\"## *Go to the dashboard to view your newly scraped data data.*\")\n\n if run_location == \"local\":\n st.markdown(\"### Export Options\")\n if st.checkbox(\"Would you like to export results?\", state.export_checkbox):\n state.export_checkbox = True\n st.markdown(\n \"#### Enter New or Existing Export File Name (filename.json):\"\n )\n state.file_name = st.text_input(\n \"Enter the export filename.\", state.input or \"\"\n )\n if state.file_name:\n for data in state.finalized_data:\n json_handler.append_json(\n data[\"prediction_results\"], state.file_name\n )\n st.markdown(\"Your data has been exported!\")\n else:\n st.markdown(\"Enter a file name to export data!\")", "def test_010_view_settings(self):\n\n testflow.step(\"Showing setting via CLI\")\n assert self.settings_cli.run('show')[0], \"Failed to view settings\"", "def __gitConfigure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"zzz_gitPage\")", "def _currentPageSettings(self):\n view = self._window.currentBrowser()\n if view is None:\n return None\n \n return view.page().settings()", "def generate_description_general_settings(self):\n\n if not self.config.has_section('description'):\n self.config.add_section('description')\n\n self.config.set('description', '# Ein generelles Settingfile.')\n self.config.set('description', '# Hier werden statische Daten gespeichert, die von user zu user')\n self.config.set('description', '# und von Welt zu Welt verschieden sind. Diese Daten hier können')\n self.config.set('description', '# nicht automatisch generiert werden.')\n\n with open(self.settingpath, 'w') as cfile:\n self.config.write(cfile)", "def settings_information():\n return {\n \"version\": VERSION,\n \"modules_directory\": MODULES_DIR,\n \"web_directory\": WEB_DIR,\n \"dependencies_directory\": DEPENDENCIES_DIR,\n \"bot_directory\": BOT_DIR,\n \"bot_data_directory\": BOT_DATA_DIR,\n \"bot_image_directory\": BOT_IMAGE_DIR,\n \"local_data_directory\": LOCAL_DATA_DIR,\n \"local_data_database_directory\": LOCAL_DATA_DB_DIR,\n \"local_data_log_directory\": LOCAL_DATA_LOG_DIR,\n \"local_data_backup_directory\": LOCAL_DATA_BACKUP_DIR,\n \"database_name\": DB_NAME,\n \"database_file\": DB_FILE,\n \"authentication_base_url\": AUTH_BASE_URL,\n \"authentication_auth_url\": AUTH_AUTH_URL,\n \"tesseract_dependency_directory\": TESSERACT_DEPENDENCY_DIR,\n \"tesseract_directory\": TESSERACT_DIR,\n \"tesseract_path\": TESSERACT_PATH,\n }", "def add_settings_early(self):\n\n # config settings\n config = {\n # some generic settings for every site, to point to location of some stuff\n mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks: [pkgdirimp_sitempacks],\n mconst.DEF_SETTINGNAME_controllerroot: pkgdirimp_controllers,\n mconst.DEF_SETTINGNAME_sitefilepath: misc.calc_modulefiledirpath(__file__),\n # should we also load mewlo site installed setuptools plugins\n mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks: True,\n mconst.DEF_SETTINGNAME_replaceshadowpath: '${sitefilepath}/replaceshadow',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # Name of site\n mconst.DEF_SETTINGNAME_sitename: 'Mewlo',\n # Specify where this site serves from\n # these siteurls should not end in / so if you are serving a site at root just use relative of '' and absolute of 'http://sitename.com'\n mconst.DEF_SETTINGNAME_siteurl_relative: '',\n mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080',\n #mconst.DEF_SETTINGNAME_siteurl_relative: '/public/publicity',\n #mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080/public/publicity',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # online status information\n mconst.DEF_SETTINGNAME_isenabled: True,\n mconst.DEF_SETTINGNAME_isonline: True,\n mconst.DEF_SETTINGNAME_offline_mode: 'maintenance',\n mconst.DEF_SETTINGNAME_offline_message: 'We are down for leap-year maintenance; we will be back soon.',\n mconst.DEF_SETTINGNAME_offline_allowadmin: False,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n\n\n # extension pack config -- we need to explicitly enable plugins\n packconfig = {\n 'mouser.mewlotestplug' : {\n 'isenabled': False,\n },\n 'mouser.testpack' : {\n 'isenabled': False,\n },\n 'mewlo.siteaddon.account' : {\n 'isenabled': True,\n },\n 'mewlo.siteaddon.group' : {\n 'isenabled': True,\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_packs, packconfig)\n\n\n # database config\n databaseconfig = {\n 'settings' : {\n 'sqlalchemy_loglevel' : logging.NOTSET,\n #'sqlalchemy_loglevel' : logging.INFO,\n },\n 'default' : {\n 'url' : 'sqlite:///${dbfilepath}/mewlo_testsite1.sqlite',\n #'tablename_prefix': 'mewlo_',\n 'flag_echologging' : False,\n },\n 'mysql_unused' : {\n # Sample configuration for mysql\n 'url' : 'mysql://mewlo_user:mewlo_pass@localhost:3306/mewlo_testsite1',\n 'tablename_prefix': 'mewlo_'\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_database, databaseconfig)\n self.settings.listappend_settings_key(mconst.DEF_SETTINGSEC_make_dirs, '${dbfilepath}')\n\n # email config settings\n mailconfig = {\n # online status information\n 'smtp_host': self.get_configval('mail_smtp_host'),\n 'smtp_login': self.get_configval('mail_smtp_login'),\n 'smtp_port': self.get_configval('mail_smtp_port'),\n 'smtp_mode': self.get_configval('mail_smtp_mode'),\n 'smtp_password': self.get_configval('mail_smtp_password'),\n 'mail_from' : self.get_configval('mail_from'),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_mail, mailconfig)\n\n\n # account siteaddon settings\n siteaddonconfig = {\n # online status information\n 'registration_mode': 'immediate',\n 'flag_require_email_verified_before_login': False,\n }\n self.settings.merge_settings_key('siteaddon_account', siteaddonconfig)\n\n\n\n # ATTN: UNFINISHED\n # asset mounts config\n if (False):\n assetmountconfig = {\n 'default' : {\n # an internal assetmount just needs a url route\n 'type': 'internal',\n 'routeid': 'static_files',\n },\n 'external' : {\n 'type': 'external',\n 'filepath': '${mewlofilepath}/public_assets',\n 'urlpath': 'http://127.0.0.1/mewlo/public_assets',\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_asset_mounts, assetmountconfig)\n\n\n\n\n\n #print \"TESTING CONFIG1:\"\n #self.run_configfunc('sayhello',1,2,3)\n #print \"TESTING CONFIG2:\"\n #self.run_allconfigfuncs('sayhello',1,2,3)", "def config():\n config_django()\n config_svisor()", "def general_settings(request):\n serversettings = TeamspeakServer.objects.get(id=1)\n saved = False\n if request.method == \"POST\":\n serversettings.host = request.POST['ts3hostname']\n serversettings.voiceport = int(request.POST['Port'])\n serversettings.queryuser = request.POST['QueryLoginUsername']\n if request.POST['QueryLoginPasswort'] != '':\n serversettings.querypass = request.POST['QueryLoginPasswort']\n serversettings.queryport = int(request.POST['QueryPort'])\n serversettings.save()\n saved = True\n\n return TemplateResponse(\n request, 'teamspeak_settings.html',\n {'ts3hostname': serversettings.host,\n 'Port': serversettings.voiceport,\n 'QueryLoginUsername': serversettings.queryuser,\n 'QueryLoginPasswort': serversettings.querypass,\n 'QueryPort': serversettings.queryport,\n 'saved': saved}\n )", "def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]", "def magic_config(self,parameter_s=''):\n \n page('Current configuration structure:\\n'+\n pformat(self.rc.dict()))", "def config(settings):\n\n #T = current.T\n\n # PrePopulate data\n settings.base.prepopulate += (\"SHARE/LK\",)\n settings.base.prepopulate_demo += (\"SHARE/Demo\",)\n\n # Finance settings\n settings.fin.currencies = {\n #\"EUR\" : \"Euros\",\n #\"GBP\" : \"Great British Pounds\",\n \"LKR\" : \"Sri Lanka Rupees\",\n \"USD\" : \"United States Dollars\",\n }\n settings.fin.currency_default = \"USD\"", "def config(self):\n pass", "def config(self):\n pass", "def other_settings():\n return OTHER_SETTINGS", "def config(settings):\n\n #T = current.T\n\n # Pre-Populate\n settings.base.prepopulate.append(\"locations/TH\")\n\n # Uncomment to restrict to specific country/countries\n settings.gis.countries.append(\"TH\")\n\n # -------------------------------------------------------------------------\n # L10n (Localization) settings\n settings.L10n.languages[\"th\"] = \"Thai\"\n # Default Language (put this in custom template if-required)\n #settings.L10n.default_language = \"th\"\n # Default timezone for users\n settings.L10n.timezone = \"Asia/Bangkok\"\n # Default Country Code for telephone numbers\n settings.L10n.default_country_code = 66\n\n settings.fin.currencies[\"THB\"] = \"Baht\"\n settings.fin.currency_default = \"THB\"", "def settings_page():\n log.info(\":WEB:/settings\")\n if \"username\" in session.keys():\n if \"logged-in\" in session.keys():\n if session[\"logged-in\"]:\n session[\"user\"] = db[\"users\"].find_one(username=session[\"username\"])\n if session[\"user\"]:\n return render_template(\"settings.html\")\n return redirect(\"/\")", "def getDefaultSettings():\n return {}", "def globalsettings(golbalsettingbutton):\n try:\n atomacclick(golbalsettingbutton)\n global_settings_content = getApplicatontitle(golbalsettingbutton)\n except Exception as er:\n print \"Not able to get globalsettings_content\"\n return False\n return global_settings_content", "def settings():\r\n\r\n config = cp.ConfigParser()\r\n config.read('settings.ini')\r\n \r\n files = config['files']\r\n model = config['model']\r\n plot = config['plot']\r\n \r\n file_format = files['format']\r\n species_file = r'data/' + files['species file']\r\n reactions_file = r'data/' + files['reactions file']\r\n output_file = 'output/' + files['output file']\r\n model_type = model['model type']\r\n density = model.getfloat('density')\r\n temperature = model.getfloat('temperature')\r\n start_time = model.getfloat('start time')\r\n end_time = model.getfloat('end time')\r\n outfile = plot['outfile for plotting']\r\n\r\n return file_format, species_file, reactions_file, output_file, model_type, density, temperature, start_time, end_time, outfile", "def print_settings(self, title=None):\n if title:\n print(title)\n print('Contents of imagenode.yaml:')\n pprint.pprint(self.config)\n print()", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def get_default_config_help(self):\n config = super(SignalfxHandler, self).get_default_config_help()\n\n config.update({\n 'url': 'Where to send metrics',\n 'batch': 'How many to store before sending',\n 'auth_token': 'Org API token to use when sending metrics',\n })\n\n return config", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def SaveSettings(request):\n return render(request, \"dbkeeper/save_settings.html\")", "def __configure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"logViewerPage\")", "def client_settings():\n return CLIENT_SETTINGS", "def getConfigData():\n return {\n \"djangoPage\": [\n QCoreApplication.translate(\"ProjectDjangoPlugin\", \"Django\"),\n os.path.join(\"ProjectDjango\", \"icons\", \"django.png\"),\n createDjangoPage, None, None],\n }", "def __init__( settings={} ):", "def edit_settings(self):\n return 1 << 3", "def settings_init(self):\n config_console = configparser.ConfigParser()\n config_console.read(CONFIG_FILE_NAME)\n self.logmode = config_console[\"LOG\"][\"log_mode\"]", "async def fishingsettings(self, ctx:commands.Context):", "def base_settings():\n return \"\"\"\n iota = True\n rho = False\n omega = True\n chi = True\n pini = False\n\n emr = 0\n constrain_omega = 1\n iota.at_specific = 0\n iota.min = 0.0001\n iota.age_cnt = 2\n iota.time_cnt = 2\n omega.at_specific = 1\n omega.min = 0.0001\n omega.age_cnt = 0\n omega.time_cnt = 0\n chi.at_specific = 0\n chi.min = 0.0001\n chi.age_cnt = 1\n chi.time_cnt = 2\n drill_start = 0\n drill_end = -1\n re.iota = all\n re.omega = all\n re.chi = all\n study.0 = False\n study.11 = True\n study.11.at_specific = 0\n study.11.age_cnt = 1\n study.11.time_cnt = 1\n study.11.covtype = rate_value\n study.11.rate = chi\n study.1604 = True\n study.1604.at_specific = 0\n study.1604.age_cnt = 1\n study.1604.time_cnt = 1\n study.1604.covtype = meas_std\n country.156 = True\n country.156.at_specific = 0\n country.156.age_cnt = 1\n country.156.time_cnt = 1\n country.156.covtype = rate_value\n country.156.rate = iota\n country.1998 = True\n country.1998.at_specific = 0\n country.1998.age_cnt = 1\n country.1998.time_cnt = 1\n country.1998.covtype = meas_std\n job_idx = 0\n \"\"\"", "def test_settings(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '>']\n main(None)\n self.assertEqual(len(wf._items), 4)\n self.assertEqual(wf._items[0].title, SETTINGS['LOGIN']['title'])\n self.assertEqual(wf._items[1].title, SETTINGS['LOGOUT']['title'])\n self.assertEqual(wf._items[2].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[3].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n wf._items = []", "def check_settings(self):\r\n pass", "def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)", "def site_settings(self):\r\n return users.SiteSettings(self)", "def app_settings():\n return {\n 'app_wksp_path': os.path.join(App.get_app_workspace().path, ''),\n 'threddsdatadir': App.get_custom_setting(\"thredds_path\"),\n 'threddsurl': App.get_custom_setting(\"thredds_url\"),\n 'logfile': os.path.join(App.get_app_workspace().path, 'workflow.log')\n }", "def save(self):\n return self.client._perform_empty(\"PUT\", \"/admin/general-settings\", body = self.settings)", "def config(self):\n return {}", "def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output", "def updateSettingsUI(self):\n\n pass", "async def settings(self, ctx):\n settings = config.load_settings()\n guild = ctx.guild.id\n embed = discord.Embed(\n title=ctx.guild.name + \" bot settings!\",\n description=\"My settings for this server!\",\n color=discord.Colour.purple()\n )\n embed.add_field(name=\"Prefix\", value=settings['guilds'][str(guild)]['prefix'])\n embed.add_field(name=\"Max Volume\", value=str(settings['guilds'][str(guild)]['max_volume']))\n embed.add_field(name=\"Leveling system\", value=settings['guilds'][str(guild)]['leveling'])\n embed.add_field(name=\"Welcome Message\", value=settings['guilds'][str(guild)]['welcome'])\n embed.add_field(name=\"Goodbye Message\", value=settings['guilds'][str(guild)]['goodbye'])\n embed.add_field(name=\"Warns until kick\", value=str(settings['guilds'][str(guild)]['warn_kick']))\n embed.add_field(name=\"Warns until ban\", value=str(settings['guilds'][str(guild)]['warn_ban']))\n await ctx.send(\"\", embed=embed)", "def cont_settings_(request):\n \n return {\"settings\": settings}", "def default_settings(self, settings):\n return {}", "def init_game_setting(self):\r\n pass", "async def showsettings(self, ctx: commands.Context):\n data = await self.config.all()\n channel = self.bot.get_channel(data[\"logChannel\"])\n channel = channel.mention if channel else \"None\"\n description = (\n f\"Name: {data['plagueName']}\\n\"\n f\"Log Channel: {channel}\\n\"\n f\"Infection Rate: {data['rate']}%\"\n )\n e = discord.Embed(\n color=await ctx.embed_color(),\n description=description,\n )\n e.set_author(name=f\"Plague Game Settings\", icon_url=self.bot.user.avatar_url)\n await ctx.send(embed=e)", "def quick_set_html_conversion_settings(self):\n self.logger.debug(\"HTML conversion settings\")\n self.export_format = 'html'\n self.quick_setting = 'html'\n self.front_matter_format = 'yaml'\n self.metadata_schema = []\n if self.conversion_input == 'nsx':\n self.metadata_schema = ['title', 'ctime', 'mtime', 'tag']\n self.spaces_in_tags = False\n self.split_tags = False\n self.first_row_as_header = True\n self.first_column_as_header = True\n self.chart_image = True\n self.chart_csv = True\n self.chart_data_table = True", "def get_common_settings(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Settings/\"))", "def settings_load(self):\n self.ui.spinBox_ATSP.setValue(self.default['ATSP'])\n\n if self.default['serialLabel'] == 'bt':\n self.ui.btRadio.setChecked(True)\n try:\n os.system(\"blueman-manager\")\n except:\n print \"Please install 'blueman' package\"\n elif self.default['serialLabel'] == 'usb':\n self.ui.usbRadio.setChecked(True)\n else:\n self.ui.devRadio.setChecked(True)\n\n if self.default['units'] == 'metric':\n self.ui.units_metric_radio.setChecked(True)\n else:\n self.ui.units_US_radio.setChecked(True)\n\n return", "def quick_set_manual_settings(self):\n self.logger.debug(\"Manual conversion settings\")\n self.quick_setting = 'manual'\n self.front_matter_format = 'none'\n self.metadata_schema = []\n if self.conversion_input == 'nsx':\n self.metadata_schema = ['title', 'ctime', 'mtime', 'tag']\n self.spaces_in_tags = False\n self.split_tags = False\n self.first_row_as_header = False\n self.first_column_as_header = False\n self.chart_image = True\n self.chart_csv = True\n self.chart_data_table = True", "def settings(self):\r\n url = '{0}/userSettings'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def __writeConfig(self):\n page = None\n\n #TODO: get values of configurations here\n particles = \"#f\" if not base.particleMgrEnabled else \"#t\"\n volume = str(round(base.musicManager.getVolume(), 2))\n mute = \"#f\" if base.AppHasAudioFocus else \"#t\"\n #TODO: add any configuration variable name that you have added\n customConfigVariables = [\n \"\", \"particles-enabled\", \"audio-mute\", \"audio-volume\"]\n if os.path.exists(prcFile):\n # open the config file and change values according to current\n # application settings\n page = loadPrcFile(Filename.fromOsSpecific(prcFile))\n removeDecls = []\n for dec in range(page.getNumDeclarations()):\n # Check if our variables are given.\n # NOTE: This check has to be done to not loose our base or other\n # manual config changes by the user\n if page.getVariableName(dec) in customConfigVariables:\n decl = page.modifyDeclaration(dec)\n removeDecls.append(decl)\n for dec in removeDecls:\n page.deleteDeclaration(dec)\n # NOTE: particles-enabled and audio-mute are custom variables and\n # have to be loaded by hand at startup\n # Particles\n page.makeDeclaration(\"particles-enabled\", particles)\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", mute)\n else:\n # Create a config file and set default values\n cpMgr = ConfigPageManager.getGlobalPtr()\n page = cpMgr.makeExplicitPage(\"%s Pandaconfig\"%appName)\n # set OpenGL to be the default\n page.makeDeclaration(\"load-display\", \"pandagl\")\n # get the displays width and height\n w = self.pipe.getDisplayWidth()\n h = self.pipe.getDisplayHeight()\n # set the window size in the config file\n page.makeDeclaration(\"win-size\", \"%d %d\"%(w, h))\n # set the default to fullscreen in the config file\n page.makeDeclaration(\"fullscreen\", \"1\")\n # particles\n page.makeDeclaration(\"particles-enabled\", \"#t\")\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", \"#f\")\n # create a stream to the specified config file\n configfile = OFileStream(prcFile)\n # and now write it out\n page.write(configfile)\n # close the stream\n configfile.close()", "def readSettingsFile():\n\tglobal logfile\n\tglobal backupCount\n\tglobal maxBytes\n\tglobal debug\n\t\n\tif SettingsFile.getOptionString(INI_Section,\"logfile\"):\n\t\tlogfile = SettingsFile.getOptionString(INI_Section,\"logfile\")\n\tif SettingsFile.getOptionInt(INI_Section,\"maxBytes\"):\n\t\tmaxBytes = SettingsFile.getOptionInt(INI_Section,\"maxBytes\")\n\tif SettingsFile.getOptionInt(INI_Section,\"backupCount\"):\n\t\tbackupCount = SettingsFile.getOptionInt(INI_Section,\"backupCount\")\n\tif SettingsFile.getOptionBoolean(INI_Section,\"debug\"):\n\t\tdebug = SettingsFile.getOptionBoolean(INI_Section,\"debug\")\n\t#endif", "async def _show_settings(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n host = await self.config.guild(guild).host_id()\n if host:\n host = discord.utils.get(guild.roles, id=host).mention\n else:\n host = f\"`{host}`\"\n\n player = await self.config.guild(guild).player_id()\n if player:\n player = discord.utils.get(guild.roles, id=player).mention\n else:\n player = f\"`{player}`\"\n\n spec = await self.config.guild(guild).spec_id()\n if spec:\n spec = discord.utils.get(guild.roles, id=spec).mention\n else:\n spec = f\"`{spec}`\"\n\n dead = await self.config.guild(guild).dead_id()\n if dead:\n dead = discord.utils.get(guild.roles, id=dead).mention\n else:\n dead = f\"`{dead}`\"\n\n repl = await self.config.guild(guild).repl_id()\n if repl:\n repl = discord.utils.get(guild.roles, id=repl).mention\n else:\n repl = f\"`{repl}`\"\n\n signup = await self.config.guild(guild).signup_channel()\n if signup:\n signup = discord.utils.get(guild.text_channels, id=signup).mention\n else:\n signup = f\"`{signup}`\"\n\n na_ch = await self.config.guild(guild).na_channel_id()\n if na_ch:\n na_ch = discord.utils.get(guild.text_channels, id=na_ch).mention\n else:\n na_ch = f\"`{na_ch}`\"\n\n can_change_na = await self.config.guild(guild).can_change_na()\n\n lock = await self.config.guild(guild).tvmset_lock()\n\n sign_ups = await self.config.guild(guild).signups_on()\n\n total = await self.config.guild(guild).total_players()\n\n signed = await self.config.guild(guild).signed()\n\n txt = _(\n \"Host Role: {}\"\n \"\\nPlayer Role: {}\"\n \"\\nSpectator Role: {}\"\n \"\\nDead Player Role: {}\"\n \"\\nReplacement Role: {}\"\n \"\\nSign-ups Channel: {}\"\n \"\\nNight Action Channel: {}\"\n \"\\nCan Change NA: `{}`\"\n \"\\nTvM Settings Lock: `{}`\"\n \"\\nSign-ups Open: `{}`\"\n \"\\nTotal Players: `{}`\"\n \"\\nSign-ups: `{}`\"\n ).format(\n host, player, spec, dead, repl, signup, na_ch,\n can_change_na, lock, sign_ups, total, signed\n )\n\n embed = discord.Embed(\n color=0xAF70FF, title=\"TvM Settings\", description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\n _(\n \"I don't have permission to embed messages.\"\n \" Please give me the permission!\"\n )\n )", "def settings(app: CommandGroup):\n\n @argument(\n \"--out\",\n dest=\"out\",\n default=sys.stdout,\n type=FileType(mode=\"w\"),\n help_text=\"File to output settings report to; default is stdout.\",\n )\n def _handler(opts) -> Optional[int]:\n \"\"\"\n Report of current settings.\n \"\"\"\n from pyapp.conf.report import SettingsReport\n\n return SettingsReport(False, opts.no_color, opts.out).run()\n\n app.command(_handler, name=\"settings\")", "def _post_initialisations(self):\n # Init the settings module.\n self.dummy_for_settings = SectionConfig(self.app.id, self.__class__.__name__)\n global settings\n settings = self.dummy_for_settings\n\n self.dummy_for_options = OptionConfig(self.app.id)\n global options\n options = self.dummy_for_options\n\n # Bind message boxes.\n self.MessageBox = MessageBox(self)\n self.msg = self.MessageBox.Message\n self.are_you_sure = self.MessageBox.are_you_sure\n\n # Set previous size and state.\n width = settings.get('width', 350)\n height = settings.get('height', 350)\n self.set_title(self.app.localizedname)\n self.resize(width, height)\n if settings.get_bool('maximized', False):\n self.maximize()\n # Load any other settings here.\n self.load_xinput_devices()", "def get_settings(self):\n return [('test_environment', self.test_environment),\n ('base_data_dir', self.base_data_dir),\n ('locale', self.locale)]", "def check_settings(self):\n pass", "def getConfigPanel():\n\treturn None", "def various(self):\n # Changer le default d'un tb, ici ne rien mettre au niveau le plus haut\n context = self\n from imio.dashboard.utils import getCollectionLinkCriterion\n criterion = getCollectionLinkCriterion(context)\n criterion.default = u''\n from eea.facetednavigation.criteria.interfaces import ICriteria\n ICriteria(context).criteria._p_changed = True\n\n # Réparer la vue de la page pst\n context.setLayout('view')\n from imio.project.pst.setuphandlers import configure_faceted_folder\n configure_faceted_folder(context, xml='default_dashboard_widgets.xml', default_UID=None)", "def __init__(self):\n self.html = conf.get('HTML', 'simplePage').format(title='Services',\n h1='Services', message='')", "def admin_only():\n return 'Super-seekrit admin page.'", "def habHelp(self):\n rf = os.path.join('docs','helpButtons','prefsHabitat.html')\n self.showHelpFile( rf )", "def create_settings():\n\n settings = {}\n\n settings['induction'] = {'type': 'DT'}\n\n settings['selection'] = {'type': 'Base',\n 'its': 1,\n 'param': 1}\n\n settings['prediction'] = {'type': 'MI',\n 'its': 0.1,\n 'param': 0.95}\n\n settings['queries'] = {}\n\n settings['metadata'] = {}\n\n settings['model_data'] = {}\n\n return settings", "def settings():\n # TODO: How should this be handled? Should a speaker's bio be stored\n # as a snapshot from event to event? It could be stored as part of a\n # talks.models.Presentation.\n from pygotham.forms import ProfileForm\n\n form = ProfileForm(request.form, obj=current_user)\n if form.validate_on_submit():\n form.populate_obj(current_user)\n db.session.commit()\n\n flash('Your profile has been updated.', 'success')\n\n return redirect(url_for('profile.settings'))\n\n return render_template('profile/settings.html', form=form)", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "def home(request):\n page=1\n span = 15\n order = \"-created_at\"\n page = request.GET.get('page', page)\n span = request.GET.get('span', span)\n config_list,entry_count = Config.get_items(span=span, page=page)\n\n temp_values = {\n \"target\":\"config\",\n \"title\":u\"Config定義一覧ページ\",\n \"config_list\":config_list,\n \"subscroll\":True,\n }\n return render(request, 'server/index.html', temp_values)" ]
[ "0.7047792", "0.68825513", "0.68671405", "0.67481273", "0.67089194", "0.670445", "0.6671479", "0.66425955", "0.6616734", "0.66150963", "0.65424526", "0.6488922", "0.64811325", "0.6472592", "0.6429102", "0.63903475", "0.6352831", "0.6333741", "0.63297915", "0.62845564", "0.6250883", "0.6228672", "0.62263554", "0.6221027", "0.6221027", "0.6220739", "0.6220739", "0.6210946", "0.618954", "0.618954", "0.6180176", "0.6164821", "0.61552596", "0.61509055", "0.61410457", "0.6133662", "0.6125521", "0.6121676", "0.6119772", "0.6110901", "0.6103265", "0.6092445", "0.605754", "0.60551304", "0.6037847", "0.6037847", "0.60259426", "0.602453", "0.6022206", "0.60161126", "0.60160345", "0.60124505", "0.6006491", "0.5992858", "0.5989378", "0.5980231", "0.5963905", "0.5958357", "0.5943221", "0.5936582", "0.59116006", "0.5897262", "0.5876193", "0.5853005", "0.5848866", "0.5844841", "0.58231366", "0.5803428", "0.5799839", "0.5793605", "0.5788384", "0.5782458", "0.5775033", "0.5774835", "0.5768868", "0.57684815", "0.57572556", "0.5750142", "0.573449", "0.5725838", "0.5724677", "0.572364", "0.57199216", "0.5715897", "0.5714748", "0.5711025", "0.5707231", "0.570204", "0.569645", "0.569386", "0.56871855", "0.5684346", "0.5679828", "0.5679258", "0.56790006", "0.5671564", "0.56709254", "0.56678915", "0.56676596", "0.5665748" ]
0.59287745
60
Calls open file dialog, possible to choose only '.xlsx .xls .xlsm .xlsb'
def callDialog(self): self.pathTuple = filedialog.askopenfilenames(filetypes=[("Excel files", ".xlsx .xls .xlsm .xlsb")]) self.fileNames = [basename(path.abspath(name)) for name in self.pathTuple]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def open_file_dialog(self, title, initial_directory=None, file_types=None, multiselect=False):\n return self._impl.open_file_dialog(title, initial_directory, file_types, multiselect)", "def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def OpenFileExcel(self, *args, **kwargs):\n directory = None\n if kwargs is not None:\n for key, value in kwargs.items():\n if key == 'directory':\n directory = value\n\n\n\n with wx.FileDialog(self, \"Open report file\", directory,\n wildcard=\"excel files (*.xlsx)|*.xlsx|(*.xls)|*.xlsx|(*.csv)|*.csv\",\n style=wx.FD_OPEN) as fileDialog:\n \n if fileDialog.ShowModal() == wx.ID_CANCEL:\n return \n\n\n else:\n\n pathname = fileDialog.GetPath()\n print('the file to be opened is :'+ pathname)\n\n def openWorkbook(xlapp, xlfile):\n try:\n xlwb = xlapp.Workbooks(xlfile)\n except Exception as e:\n try:\n xlwb = xlapp.Workbooks.Open(xlfile)\n except Exception as e:\n print(e)\n xlwb = None\n return (xlwb)\n\n pathname = os.path.normcase(pathname)\n\n\n try:\n excel = win32.gencache.EnsureDispatch('Excel.Application')\n wb = openWorkbook(excel, pathname)\n #ws = wb.Worksheets('Sheet1')\n excel.Visible = True\n except Exception as e:\n print(e)\n\n finally:\n # RELEASES RESOURCES\n ws = None\n wb = None\n excel = None", "def ask_file(message=\"Select file for open.\", title=None):\n return dialog(\"ask_file\", message=message, title=title)", "def choose_file():\r\n import tkinter\r\n from tkinter import filedialog\r\n\r\n root_window = tkinter.Tk()\r\n root_window.withdraw()\r\n\r\n return filedialog.askopenfilename()", "def open_file(self: object) -> None:\n self.file = filedialog.askopenfilename(\n initialdir= os.getcwd(),title=\"Select File\",filetypes=(\n (\"Text Files\", \"*.txt\"),(\"all files\",\"*.*\")))\n\n if self.file:\n messagebox.showinfo(\"Selected file\", \"You have selected %s\"%(\n self.file))", "def file_to_open(self, title='Open file..', initial_folder=None, extension=\"All files (*.*)\", datafolder=None):\n pass", "def open_fileDialog(self):\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Открыть исходный файл\", os.path.expanduser(\"~\"),\n \"XML Файлы (*.xml);;JSON Файлы (*.json)\", options=options)\n if fileName:\n file_format = fileName.split('.')[1]\n if file_format == 'xml':\n self.data_from_xml(fileName)\n elif file_format == 'json':\n self.data_from_json(fileName)\n self.msg2Statusbar.emit('Импорт из файла {0}'.format(fileName))", "def menu_Open():\n asdf = tkFileDialog.askopenfilename()\n print(asdf)", "def request_file():\n \n from tkinter import Tk\n from tkinter.filedialog import askopenfilename\n \n # Make a top-level instance and hide from user.\n root = Tk()\n root.withdraw()\n\n # Make it almost invisible - no decorations, 0 size, top left corner.\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n\n # Show window again and lift it to top so it can get focus, otherwise dialogs will end up behind the terminal.\n root.deiconify()\n root.lift()\n root.focus_force()\n\n # Show an \"Open\" dialog box and return the path to the selected file\n file_path = askopenfilename(initialdir='./IR_Datasets/',\n title='Excel to Read',\n filetypes=(('New Excel', '*xlsx'), ('Old Excel', '*.xls')),\n parent=root)\n\n # Get rid of the top-level instance once to make it actually invisible.\n root.destroy()\n \n return file_path", "def input_file(self):\r\n try:\r\n f = tkFileDialog.askopenfilename(parent=self.top, initialdir=\"/home/marcin/pulpit/Py/\",\r\n title=\"Wybór pliku excel z danymi\",\r\n filetypes=[(\"Excel file\", \".xlsx\")])\r\n self.filepath_input.set(os.path.realpath(f))\r\n self.excel_input_file = os.path.realpath(f)\r\n except ValueError:\r\n tkMessageBox.showerror(\"Error\", \"Wystąpił problem z załadowaniem pliku excel z danymi.\")", "def FileOpenDialog( message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile='' ):\n style = style | wx.OPEN | wx.CHANGE_DIR\n return FileDialog( message, wildcard, style, defaultDir, defaultFile )", "def msg_open(self,msg):\r\n filepaths = msg.get_data()\r\n if filepaths is ():\r\n #Create the file open dialog.\r\n filepaths,index = DoFileDialog(self.frame, wildcard = \"Python source (*.py,*.pyw)|*.py;*.pyw|All files (*,*.*)|*.*;*\")\r\n if filepaths==None:\r\n return\r\n\r\n if (filepaths is not None) and (filepaths!=[]):\r\n #open the file requested\r\n for path in filepaths:\r\n self.frame.notebook.OpenFile(path)\r\n self.frame.Show()\r\n self.frame.Raise()", "def on_open_button(self, event):\n wildcard = \"All files (*.*)|*.*|\"\\\n \"Preprocessed _iso_res.csv file (*_iso_res.csv)|*_iso_res.csv|\"\\\n \"Massacre iso_csv file (*_iso.csv)|*_iso.csv|\"\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n \n if dlg.ShowModal() == wx.ID_OK:\n fullname = dlg.GetPaths()[0].split('/')\n dpa = '/'.join(fullname[:-1]) + '/'\n self.currentDirectory = dpa\n fna = fullname[-1]\n [dfr, pul, vlab] = openFile(dpa+fna)\n startApp(dfr, dpa, fna, pul, vlab, fsize=self.fsize, size=self.size)\n\n dlg.Destroy()", "def askOpen(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN):\r\n defaultDir,defaultFile = [GPath(x).s for x in (defaultDir,defaultFile)]\r\n dialog = wx.FileDialog(parent,title,defaultDir,defaultFile,wildcard, style )\r\n if dialog.ShowModal() != wx.ID_OK: \r\n result = False\r\n elif style & wx.MULTIPLE:\r\n result = map(GPath,dialog.GetPaths())\r\n else:\r\n result = GPath(dialog.GetPath())\r\n dialog.Destroy()\r\n return result", "def open_file(self, event=None):\n file = fd.askopenfile(title=\"Choose file to open\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n if imghdr.what(\n file.name): # if file is image return image type otherwise return None if file is not an image type\n from project_explorer import ProjectExplorer\n ProjectExplorer().open_image(file.name)\n else:\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()", "def select_file() -> True:\n current_directory = os.getcwd()\n selected_file = eg.fileopenbox(title=EG_TITLE+': Open a file',\n default=os.path.join(current_directory, \"..\"),\n filetypes=\"*.txt,*.py\")\n print(f\"Selected file: {os.path.basename(selected_file)}\")\n print(f\"In directory: {os.path.dirname(selected_file)}\")\n return True", "def file_popup(file) -> str:\n layout = [\n [sg.Text(f\"Select the action to perform on\\n\\n{file}\")],\n [sg.Button(\"Open File\", key=\"-APP-\"),\n sg.Button(\"Open in File Explorer\", key=\"-EXPLORER-\"),\n sg.Button(\"Delete File\", key=\"-DEl-\",\n button_color=(\"Black\", \"OrangeRed\"))]\n ]\n window = sg.Window(\"Open selected file.\", layout, finalize=True)\n button, value = window.read()\n window.close()\n del window\n return button", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def get_file_path():\n root = tk.Tk()\n root.withdraw()\n file_path = filedialog.askopenfilename(filetypes=[(\"Excel file\", \"*.xlsx\")])\n return file_path", "def browse(self):\n formats = [\n \"Text - comma separated (*.csv, *)\",\n \"Text - tab separated (*.tsv, *)\",\n \"Text - all files (*)\"\n ]\n\n dlg = QFileDialog(\n self, windowTitle=\"Open Data File\",\n acceptMode=QFileDialog.AcceptOpen,\n fileMode=QFileDialog.ExistingFile\n )\n dlg.setNameFilters(formats)\n state = self.dialog_state\n lastdir = state.get(\"directory\", \"\")\n lastfilter = state.get(\"filter\", \"\")\n\n if lastdir and os.path.isdir(lastdir):\n dlg.setDirectory(lastdir)\n if lastfilter:\n dlg.selectNameFilter(lastfilter)\n\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QFileDialog.Accepted:\n self.dialog_state[\"directory\"] = dlg.directory().absolutePath()\n self.dialog_state[\"filter\"] = dlg.selectedNameFilter()\n\n selected_filter = dlg.selectedNameFilter()\n path = dlg.selectedFiles()[0]\n # pre-flight check; try to determine the nature of the file\n mtype = _mime_type_for_path(path)\n if not mtype.inherits(\"text/plain\"):\n mb = QMessageBox(\n parent=self,\n windowTitle=\"\",\n icon=QMessageBox.Question,\n text=\"The '{basename}' may be a binary file.\\n\"\n \"Are you sure you want to continue?\".format(\n basename=os.path.basename(path)),\n standardButtons=QMessageBox.Cancel | QMessageBox.Yes\n )\n mb.setWindowModality(Qt.WindowModal)\n if mb.exec() == QMessageBox.Cancel:\n return\n\n # initialize dialect based on selected extension\n if selected_filter in formats[:-1]:\n filter_idx = formats.index(selected_filter)\n if filter_idx == 0:\n dialect = csv.excel()\n elif filter_idx == 1:\n dialect = csv.excel_tab()\n else:\n dialect = csv.excel_tab()\n header = True\n else:\n try:\n dialect, header = sniff_csv_with_path(path)\n except Exception:\n dialect, header = csv.excel(), True\n\n options = None\n # Search for path in history.\n # If found use the stored params to initialize the import dialog\n items = self.itemsFromSettings()\n idx = index_where(items, lambda t: samepath(t[0], path))\n if idx is not None:\n _, options_ = items[idx]\n if options_ is not None:\n options = options_\n\n if options is None:\n if not header:\n rowspec = []\n else:\n rowspec = [(range(0, 1), RowSpec.Header)]\n options = Options(\n encoding=\"utf-8\", dialect=dialect, rowspec=rowspec)\n\n dlg = CSVImportDialog(\n self, windowTitle=\"Import Options\", sizeGripEnabled=True)\n dlg.setWindowModality(Qt.WindowModal)\n dlg.setPath(path)\n dlg.setOptions(options)\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QDialog.Accepted:\n self.set_selected_file(path, dlg.options())", "def open_files():\n import Tkinter\n import tkFileDialog\n\n root = Tkinter.Tk()\n root.withdraw()\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n \n root.deiconify()\n root.lift()\n root.focus_force()\n \n filenames = tkFileDialog.askopenfilenames(parent=root, title = \"Open file\")\n root.destroy()\n \n return filenames[0]", "def ui_open(*files):\r\n if files:\r\n osname = os.uname()[0].lower()\r\n if not osname in _OPENER_BY_OS:\r\n print('Sorry, open currently not supported for ' + osname)\r\n else:\r\n _OPENER_BY_OS[osname](files)", "def fileBrowserDialog(*args, actionName: AnyStr=\"\", dialogStyle: int=0, fileCommand:\n Script=None, fileType: AnyStr=\"\", filterList: Union[AnyStr,\n List[AnyStr]]=\"\", includeName: AnyStr=\"\", mode: int=0, operationMode:\n AnyStr=\"\", tipMessage: AnyStr=\"\", windowTitle: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def get_input_name():\n xlsTypes = [(\"Файлы Excel или csv\", \".xls .xlsx\")]\n return askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=xlsTypes, title=\"Выберите файлы Excel или CSV\")", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def _open_files(view, sel):\n schema, word = get_names(view, sel)\n file_name = word + '.sql'\n path = [schema, None, file_name]\n files = find_file(view.window().folders(), path)\n if len(files) > 5:\n print('something is wrong; too many files; aborting')\n return\n for f in files:\n view.window().open_file(f)", "def on_open(self):\n\n ftypes = [('CSV', '.csv'), ('JSON', '.json'), ('All files', '*')]\n dlg = filedialog.Open(self, filetypes=ftypes)\n\n absolute_file_path = dlg.show()\n \n if absolute_file_path:\n # extract the file name from the absolute path\n file_name = absolute_file_path.split('/')[len(absolute_file_path.split('/')) - 1]\n \n # update the label text\n self.selected_file_name.configure(text=file_name)\n\n self.__set_full_path_of_file(absolute_file_path)\n else:\n # update the label text\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n self.__set_full_path_of_file(None)", "def open_file_dialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self,\n \"Select text file\",\n \"\",\n \"Text Files(*);;\",\n options=options)\n if file_name:\n try:\n content = read_file(file_name)\n self.ui.plainTextEdit.setPlainText(\"\".join(content))\n except:\n QMessageBox.question(self, 'Error', \"Chosen file is not text\",\n QMessageBox.Ok | QMessageBox.NoButton)", "def popup(self, title, callfn, initialdir=None, filename=None):\n self.cb = callfn\n filenames = QtGui.QFileDialog.getOpenFileNames(\n self.parent, title, initialdir, filename)\n\n # Special handling for PyQt5, see\n # https://www.reddit.com/r/learnpython/comments/2xhagb/pyqt5_trouble_with_openinggetting_the_name_of_the/\n filenames = filenames[0]\n\n all_paths = []\n for filename in filenames:\n\n # Special handling for wildcard or extension.\n # This is similar to open_files() in FBrowser plugin.\n if '*' in filename or '[' in filename:\n info = iohelper.get_fileinfo(filename)\n ext = iohelper.get_hdu_suffix(info.numhdu)\n files = glob.glob(info.filepath) # Expand wildcard\n paths = ['{0}{1}'.format(f, ext) for f in files]\n if self.all_at_once:\n all_paths.extend(paths)\n else:\n for path in paths:\n self.cb(path)\n\n else:\n # Normal load\n if self.all_at_once:\n all_paths.append(filename)\n else:\n self.cb(filename)\n\n if self.all_at_once and len(all_paths) > 0:\n self.cb(all_paths)", "def OnOpenFile( self, event ):\n dialog = wx.FileDialog( self, style=wx.OPEN|wx.FD_MULTIPLE )\n if dialog.ShowModal( ) == wx.ID_OK:\n paths = dialog.GetPaths()\n if self.loader:\n # we've already got a displayed data-set, open new window...\n frame = MainFrame()\n frame.Show( True )\n frame.load( *paths )\n else:\n self.load( *paths )", "def _open_files(self):\n file_names = filedialog.askopenfilenames(initialdir=self.current_directory, title = \"Select file\")\n if(file_names): self.current_directory = os.path.dirname(file_names[0])\n if(len(file_names) == 1):\n file_names = file_names[0]\n return file_names", "def _open_file(self):\n file = QFileDialog.getOpenFileName(self, \"Open file\", \".\")[0]\n if file:\n self.try_add_tab(file)", "def loadPathMenuAction(self):\n logger.info('loadPathMenuAction')\n fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', '',\"Files (*.csv *.xlsx)\")\n fname = fname[0] # fname is a tuple\n print(f'fname: \"{fname}\"')\n if os.path.isfile(fname):\n self.loadPath(fname)", "def import_file(self):\n from tkinter import filedialog\n self.filepath = filedialog.askopenfilenames(\n initialdir=\"/\", title=\"Select file\",\n filetypes=((\"PNG files\", \"*.png\"),\n (\"JPEG files\", \"*.jpeg\"),\n (\"TIFF files\", \"*.tiff\"),\n (\"ZIP files\", \"*.zip\"),\n (\"all files\", \"*.*\")))", "def askOpenMulti(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN|wx.MULTIPLE):\r\n return askOpen(parent,title,defaultDir,defaultFile,wildcard,style )", "def FileDialog( message, wildcard, style, defaultDir=os.getcwd(), defaultFile='' ):\n dlg = wx.FileDialog( wx.GetApp().GetTopWindow(), message, defaultDir, defaultFile, wildcard, style )\n if dlg.ShowModal() == wx.ID_OK:\n if style & wx.MULTIPLE:\n result = dlg.GetPaths()\n else:\n result = dlg.GetPath()\n else:\n result = False\n dlg.Destroy()\n \n return result", "def filepicker():\n import tkinter as tk\n from tkinter import filedialog\n\n root = tk.Tk()\n root.withdraw()\n\n file_path = filedialog.askopenfilename()\n return file_path", "def browse_1(self):\r\n file = QFileDialog()\r\n filter_name = \"Csv files (*.csv);;Text files (*.txt);;Xls files (*.xls);; Xlsx files (*.xlsx)\"\r\n file.setNameFilter(filter_name)\r\n if file.exec():\r\n filenames = file.selectedFiles()\r\n self.browseLine.setText(str(filenames[0]))", "def runOpenFileDialog(\n self,\n c: Cmdr,\n title: str,\n filetypes: list[str],\n defaultextension: str='',\n multiple: bool=False,\n startpath: str=None,\n ) -> Union[list[str], str]: # Return type depends on the evil multiple keyword.\n if g.unitTesting:\n return ''\n\n # 2018/03/14: Bug fixes:\n # - Use init_dialog_folder only if a path is not given\n # - *Never* Use os.curdir by default!\n if not startpath:\n # Returns c.last_dir or os.curdir\n startpath = g.init_dialog_folder(c, c.p, use_at_path=True)\n filter_ = self.makeFilter(filetypes)\n dialog = QtWidgets.QFileDialog()\n self.attachLeoIcon(dialog)\n func = dialog.getOpenFileNames if multiple else dialog.getOpenFileName\n if c:\n try:\n c.in_qt_dialog = True\n val = func(parent=None, caption=title, directory=startpath, filter=filter_)\n finally:\n c.in_qt_dialog = False\n else:\n val = func(parent=None, caption=title, directory=startpath, filter=filter_)\n # This is a *PyQt* change, not a Qt change.\n val, junk_selected_filter = val\n if multiple:\n files = [g.os_path_normslashes(s) for s in val]\n if c and files:\n c.last_dir = g.os_path_dirname(files[-1])\n # A consequence of the evil \"multiple\" kwarg.\n return files\n s = g.os_path_normslashes(val)\n if c and s:\n c.last_dir = g.os_path_dirname(s)\n return s", "def open_file(self): # need to fix this to open in a new window\n\t\tself.file_path = filedialog.askopenfilename()\n\t\tf = open(self.file_path)\n\t\tfreader = f.read()\n\t\tself.textBox.insert(END, freader)", "def OpenSpecificFiles(name, extensions):\n \n wildcard = create_wildcard(name, extensions)\n\n afile = \"\"\n dlg = wx.FileDialog(None, \n \"Select a file\", \n os.getcwd(), \n \"\", \n wildcard, \n wx.FD_OPEN | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n afile = dlg.GetPath()\n\n dlg.Destroy()\n return afile", "def selectFile(title=\"Select image\", initialdir=None, multiple=False):\r\n file = filedialog.askopenfilename(\r\n initialdir=initialdir,\r\n multiple=multiple,\r\n title=title\r\n )\r\n return file", "def _filename_multi(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilenames(**self._kwargs)", "def onOpenMenu(self, item):\n self.dialog = pyui.dialogs.FileDialog(os.getcwd(), self.onOpenChosen, \".*stk\")\n self.dialog.doModal()\n return 1", "def _open_file(self, path):\n path = os.path.normpath(os.path.abspath(path))\n while True:\n dialog = self._app.window(class_name='#32770')\n dialog.wait('ready')\n\n # If asked whether to save changes, say no\n try:\n dialog_text = dialog.StaticWrapper2.element_info.name\n if 'Save it?' in dialog_text:\n dialog.Button2.click()\n continue\n except MatchError:\n pass\n break\n\n dialog.Edit1.set_edit_text(path)\n dialog.Edit1.send_keystrokes('{ENTER}')\n self.wait_ready(timeout=60)", "def __openFiles(self):\n # set the cwd of the dialog based on the following search criteria:\n # 1: Directory of currently active editor\n # 2: Directory of currently active project\n # 3: CWD\n import QScintilla.Lexers\n fileFilter = self._getOpenFileFilter()\n progs = E5FileDialog.getOpenFileNamesAndFilter(\n self.ui,\n QCoreApplication.translate('ViewManager', \"Open files\"),\n self._getOpenStartDir(),\n QScintilla.Lexers.getOpenFileFiltersList(True, True),\n fileFilter)[0]\n for prog in progs:\n self.openFiles(prog)", "def choose_file(self):\n pass", "def fileOpenStart(pathToFile=None):\n wildcard = \"All files (*.*)|*.*|\"\\\n \"Merged and preprocessed _iso_res.csv file (*_iso_res.csv)|*_iso_res.csv|\"\n if pathToFile is None: \n app = wx.App(False) # Create a new app, don't redirect stdout/stderr to a window.\n frame = wx.Frame(None, wx.ID_ANY, \"\") # A Frame is a top-level window.\n dlg = wx.FileDialog(frame,\n message=\"Choose a file\",\n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n pathToFile = dlg.GetPaths()[0]\n\n hold = pathToFile\n dp = '/'.join(hold.split('/')[:-1])+'/'\n fn = hold.split('/')[-1]\n print \"opening file : \" + pathToFile + \"...\"\n [df, p, vl] = openFile(pathToFile)\n return [df, dp, fn, p, vl]\n frame.Destroy()\n app.Destroy()\n dlg.Destroy()", "def askopenfilename():\r\n file_opt = options = {}\r\n options['defaultextension'] = '.csv'\r\n options['filetypes'] = [('all files', '.*'), ('csv files', '.csv')]\r\n options['initialdir'] = os.getcwd()\r\n options['initialfile'] = 'profile.csv'\r\n options['title'] = 'choose file'\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**file_opt)\r\n\r\n # open file on your own\r\n return filename", "def choosefile(self, diagtitle):\r\n root = Tk()\r\n root.withdraw()\r\n sfile = tkFileDialog.askopenfilename(\r\n parent=root,\r\n filetypes = [('.TXT files', '.txt')],\r\n title=diagtitle )\r\n return sfile", "def on_File1_toolButton_clicked(self):\n my_file = QtWidgets.QFileDialog.getOpenFileName(self, u'打开文件', '/')\n if my_file[0]:\n self.File1_lineEdit.setText(my_file[0])\n else:\n QtWidgets.QMessageBox.warning(self, u'警告', u'请选择输入文件')", "def openData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n self.createTab(pandaData, name=filename)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")", "def askopenfilename(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def getFile():\n from tkinter import Tk, filedialog\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n return(filedialog.askopenfilenames())", "def OpenSWIFile():\n global bpa_file\n SWIfilename = tkinter.filedialog.askopenfilename(filetypes = [('SWI', '.swi')])\n if SWIfilename != '':\n label1.config(text = \"您选择的文件是:\" + SWIfilename)\n bpa_file = open(SWIfilename,'r',errors = \"ignore\")\n else:\n label1.config(text = \"您没有选择任何文件\")\n\n return", "def _launch_file_b(self):\n types = [\n (\"JPG\", \"*.jpg\"),\n (\"Bitmap\", \"*.bmp\"),\n (\"PNG\", \"*.png\"),\n (\"GIF\", \"*.gif\"),\n (\"All files\", \"*\")]\n dialog = tkFileDialog.Open(self, filetypes = types)\n self._file_path = dialog.show()\n\n self._file_name = self._scrub_name(self._file_path)\n self._move_img()\n return self._file_name", "def choosefile():\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**options)\r\n #print filename, '*****'\r\n\r\n # open file on your own\r\n if filename:\r\n #return open(filename, 'r')\r\n tasks.upload_chosen = filename", "def OpenTOLNFile():\n global TOLNpath\n TOLNpath = tkinter.filedialog.askopenfilename(filetypes = [('TXT', '.txt')])\n if TOLNpath != '':\n label3.config(text = \"您选择的文件是:\" + TOLNpath)\n else:\n label3.config(text = \"您没有选择任何文件\")\n #TOLN_file = open(TOLNfilename,'r',errors = \"ignore\")\n return", "def open_slot(self):\n caption = 'Open files'\n directory = './'\n filter_mask = \"JPEG File Interchange Format (*.jpg *.jpeg *jfif)|\" + \"*.jpg;*.jpeg;*.jfif\"\n files = QFileDialog.getOpenFileNames(None, caption, directory, filter_mask)[0]\n self._model.set_filenames(files)\n if len(files) > 1:\n self._ui.bt_next.setEnabled(True)\n self._ui.bt_prev.setEnabled(True)\n self._ui.bt_right.setEnabled(True)\n self._ui.bt_left.setEnabled(True)\n elif len(files) == 1:\n self._ui.bt_left.setEnabled(True)\n self._ui.bt_right.setEnabled(True)\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n else:\n self._ui.bt_left.setEnabled(False)\n self._ui.bt_right.setEnabled(False)\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n\n self.refresh_images()", "def OpenAnyFiles():\n \n wildcard = create_wildcard(\"All files\", ['*', '*.*'])\n\n files = []\n dlg = wx.FileDialog(None, \n \"Select file(s)\", \n paths.samples, \n \"\", \n wildcard, \n wx.FD_OPEN | wx.MULTIPLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n files = dlg.GetPaths()\n\n dlg.Destroy()\n return files", "def openDoc (self):\n fileName = QFileDialog.getOpenFileName(self,\n self.tr(\"Open File\"), \"\", \"All documents (*.%s;*.%s;*.%s;*.%s;*.%s;*.%s;*.%s);;Tests abstract (*.%s);;Tests unit (*.%s);;Tests suite (*.%s);;Tests plan (*.%s);;Tests global (*.%s);;Tests config (*.%s);;Tests data (*.%s)\" %\n ( TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE, \n TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE) )\n \n # new in v17.1\n if QtHelper.IS_QT5:\n _fileName, _type = fileName\n else:\n _fileName = fileName\n # end of new\n \n if not len(_fileName):\n return\n \n extension = str(_fileName).rsplit(\".\", 1)[1]\n if not ( extension.lower() in [ TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE,\n TestData.TYPE, TestUnit.TYPE, TestAbstract.TYPE ] ):\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"File not supported\") )\n return\n \n tmp = str(_fileName).rsplit(\"/\", 1)\n path = tmp[0]\n if len(tmp) > 1:\n _filename = tmp[1].rsplit(\".\", 1)[0]\n else:\n _filename = tmp[0].rsplit(\".\", 1)[0]\n self.newTab( path = path, filename = _filename, \n extension = extension, repoDest=UCI.REPO_UNDEFINED)", "def load_file(self):\n return tkinter.filedialog.askopenfilename(defaultextension=\".txt\")", "def locatefile(self):\r\n dm = DialogManager()\r\n print \"Opening file chooser ...\"\r\n file = dm.choosefile(\"Choose Raw File\")\r\n return file", "def __searchOpenFiles(self):\n self.ui.showFindFilesDialog(self.textForFind(), openFiles=True)", "def file_menu_open_activate(self, widget, data=None):\n self.open_chooser.show()", "def askOpenFile(dirname=\".\"):\n\n import Tkinter,tkFileDialog\n root = Tkinter.Tk()\n file = tkFileDialog.askopenfile(parent=root,mode='rb',title='Choose a file',initialdir=dirname)\n return file", "def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()", "def open_file_browser(path: str):\n call(file_browser + [path])", "def openFileExplorer(self, caption=''):\n\n file_path = None\n file_path, idk = QFileDialog.getOpenFileName(caption=caption)\n\n if file_path == '':\n file_path = None\n\n return file_path", "def on_File2_toolButton_clicked(self):\n my_file = QtWidgets.QFileDialog.getOpenFileName(self, u'打开文件', '/')\n if my_file[0]:\n self.File2_lineEdit.setText(my_file[0])\n else:\n QtWidgets.QMessageBox.warning(self, u'警告', u'请选择输入文件')", "def AskFileForOpen(\n message=None,\n typeList=None,\n # From here on the order is not documented\n version=None,\n defaultLocation=None,\n dialogOptionFlags=None,\n location=None,\n clientName=None,\n windowTitle=None,\n actionButtonLabel=None,\n cancelButtonLabel=None,\n preferenceKey=None,\n popupExtension=None,\n eventProc=None,\n previewProc=None,\n filterProc=None,\n wanted=None,\n multiple=None):\n\n raise NotImplementedError(\"AskFileForOpen\")", "def open_irf_file(self):\n self.irf_filename = QtWidgets.QFileDialog.getOpenFileName(self)\n try:\n if \".txt\" in self.irf_filename[0] or \".csv\" in self.irf_filename[0]:\n self.irf_skip_rows_window = SkipRowsWindow()\n self.irf_skip_rows_window.skip_rows_signal.connect(self.open_irf_with_skip_rows_window)\n self.ui.Res_comboBox.setEnabled(True)\n else:\n self.irf_file = read_picoharp_phd(self.irf_filename[0])\n except:\n pass", "def browse_file_dialog():\n root = Tkinter.Tk()\n # Make window almost invisible to focus it and ensure directory browser\n # doesn't end up loading in the background behind main window.\n root.withdraw()\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n root.deiconify()\n root.lift()\n root.focus_force()\n root.update()\n file_path = tkFileDialog.askopenfilename()\n root.destroy()\n if file_path:\n return os.path.normpath(file_path)\n else:\n return file_path", "def _openButton(self):\n #get the specified file\n selected_file = self.view.list.getSelected()\n\n if selected_file:\n self.model.open(selected_file)\n return\n\n #prompt if they really want to open maya\n dialogs = Dialogs(self.view)\n\n msg = 'No file selected!'\n msg += '\\n\\nAre you sure you want to open maya without a file?'\n dialogs.confirmPrompt(msg)\n\n self.model.open()", "def askOpenFileName(parent, title, wc, remember =- 1, filetype = None):\n\tasklist = []\n\tif remember == -1:\n\t\tconf = Configuration.getConfiguration()\n\t\tremember = conf.getConfigItem(\"RememberPath\", \"Paths\")\n\tlastpath = \"\"\n\tftype = wc.split(\"|\")[1]\n\tftype = ftype.split(\".\")[1]\n\tif filetype != None:\n\t\tftype = filetype\n\tif remember:\n\t\tlastpath = conf.getConfigItem(\"LastPath_%s\" % ftype, \"Paths\")\n\t\tif not lastpath:\n\t\t\tlastpath = \".\"\n\tdlg = wx.FileDialog(parent, title, lastpath, wildcard = wc, style = wx.OPEN|wx.MULTIPLE)\n\tif dlg.ShowModal() == wx.ID_OK:\n\t\tasklist = dlg.GetPaths()\n\t\tasklist = map(unicode, asklist)\n\t\tif not asklist:\n\t\t\treturn asklist\n\t\tif remember:\n\t\t\tfilepath = os.path.dirname(asklist[0])\n\t\t\tconf.setConfigItem(\"LastPath_%s\" % ftype, \"Paths\", filepath)\n\t\t\n\tdlg.Destroy() \n\treturn asklist", "def showOpenImageDialog(self, event):\r\n openImageDialog = wx.FileDialog(self, \"Open\",\r\n style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\r\n if openImageDialog.ShowModal() == wx.ID_CANCEL:\r\n return\r\n self.setImage(openImageDialog.GetPath())", "def openFile(self, index):\n page_name = index.data().toString()\n file_name = self.file_names[str(page_name)]\n self.main_help_window.setHtml(open(file_name, 'r').read())", "def open_file(self):\n try:\n filename = tkFileDialog.askopenfilename()\n file = open(filename)\n self.image_window.status.config(text='Opened: ' + filename)\n return file\n except:\n self.status.config(text='You fool!')\n tkMessageBox.showwarning(\"Open file\",\n \"Cannot open file \" + filename)\n return None", "def browse( self ):\n Tk.Tk().withdraw()\n filename = askopenfilename( initialdir = self.initialdir,\n title = self.title ,\n filetypes = self.filetypes )\n\n if filename == \"\":\n return\n\n self.set_text( filename )\n #rint( f\"get_text = {self.get_text()}\", flush = True )", "def open_file(self):\n files = [('Text Document', '*.txt'), ('PDF Document', '*.pdf'), ('Word Document', '*.docx')]\n text_file = askopenfile(mode='r', title=\"Open your file\", filetypes=files,\n defaultextension=files)\n if text_file is not None:\n self.file_path = text_file.name\n text_inside = self.file.load_file(text_file.name)\n text_file.close()\n self.textbox.delete(\"1.0\", tk.END)\n self.textbox.insert(\"1.0\", text_inside)\n self.text = self.textbox", "def prompt_open(file_name, file_mode):\n\n\tif file_mode.lower() != \"r\" and file_mode.lower() != \"w\":\n\t\tprint(\"That is not a mode the file can be opened in\")\n\t\treturn \"-1\"\n\n\ttry:\n\t\tfile = open(file_name, file_mode)\n\t\treturn file\n\texcept:\n\t\tprint(\"Sorry that's not a valid file\")\n\t\treturn \"-1\"", "def get_file_dialog(*, defaultpath=None, extensionfilter=None):\n qapp = QApplicationStarter()\n kwargs = {'directory': defaultpath,\n 'filter': extensionfilter}\n directory = QtGui.QFileDialog.getOpenFileName(\n None, \"Choose a file\", **kwargs)\n return directory", "def select_files():\n root = Tk()\n root.withdraw()\n root.wm_attributes('-topmost', 1)\n files = askopenfilenames(parent=root,\n title=\"Select file\",\n filetypes=((\"Image files\", '*' + ';*'.join(supported_extensions)), (\"all files\", \"*.*\"))\n )\n return root.tk.splitlist(files)", "def openInputFile(self):\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot open input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(filename))", "def openFile(self):\n\n file_path = self.openFileExplorer()\n\n if file_path is not None:\n file_name = self.getFileNameFromPath(file_path)\n print('open file')\n\n self.add_new_tab(file_path=file_path, file_name=file_name)", "def _filename(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilename(**self._kwargs)", "def open_app(self, event=None):\n if not self.ask_save():\n return\n default_path = os.path.dirname(common.root.filename or \"\") or self.cur_dir\n infile = wx.FileSelector(_(\"Open file\"),\n wildcard=\"wxGlade files (*.wxg)|*.wxg|wxGlade Template files (*.wgt)|*.wgt|\"\n \"XML files (*.xml)|*.xml|All files|*\",\n flags=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, default_path=default_path)\n if not infile: return\n self._open(infile)", "def cb_open(self, button):\n print(\"Open File callback\")\n dialog = Gtk.FileChooserDialog(\n title=\"Please choose a file\", \n parent=self, \n action=Gtk.FileChooserAction.OPEN\n )\n dialog.add_buttons(\n Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN,\n Gtk.ResponseType.OK,\n )\n\n self.add_filters(dialog)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n print(\"Open clicked\")\n print(\"File selected: \" + dialog.get_filename())\n self.filename = dialog.get_filename()\n if TESTING:\n # Testing. Place a time stamp into the file each time it is opened.\n # E.g. 'Fri May 7 16:46:41 2021'\n with open(self.filename, \"a\") as fout:\n fout.write(\"Opened: \" + time.ctime() + \"\\n\") \n \n \n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n\n dialog.destroy()", "def buttonClick(self):\n \n self.fpath=filedialog.askopenfilename()\n self.label_fpath.config(text=self.fpath)\n self.err_label.config(text='')\n pass", "def openFile(self, path=None):\n if not path:\n dialog = OpenDialog()\n dialog.set_folders_only(False)\n path = dialog.getOpenFileName(\n self,\n \"Open File\",\n '',\n \"ReStructuredText Files (*.rst *.txt)\"\n )\n\n if path:\n file_path = Path(path[0])\n filename = file_path.name\n tree_dir = file_path.parent.absolute()\n self.handleFileChanged(tree_dir, filename)", "def select_file():\n filename = filedialog.askopenfilename(\n initialdir=os.getcwd(), title=\"Select Backup file...\",\n filetypes=((\"JSON Files\", \"*.json\"),\n (\"Text Files\", \"*.txt\"),\n (\"All Files\", \"*.*\")))\n self.init_data(filename)", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def __openFile(self):\n itm = self.findList.selectedItems()[0]\n self.on_findList_itemDoubleClicked(itm, 0)", "def fileDialog2(*args, buttonBoxOrientation: int=0, cancelCaption: AnyStr=\"\", caption:\n AnyStr=\"\", dialogStyle: int=0, fileFilter: AnyStr=\"\", fileMode: int=0,\n fileTypeChanged: Script=None, hideNameEdit: bool=True, okCaption: AnyStr=\"\",\n optionsUICancel: Script=None, optionsUICommit: Script=None, optionsUICommit2:\n Script=None, optionsUICreate: Script=None, optionsUIInit: Script=None,\n returnFilter: bool=True, selectFileFilter: AnyStr=\"\", selectionChanged:\n Script=None, setProjectBtnEnabled: bool=True, startingDirectory: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def fileDialog(self, startingDir, fileFilter='All files (*.*)'):\n\n\t\tdialog = QtWidgets.QFileDialog.getOpenFileName(\n\t\t\tself, self.tr('Files'), startingDir, fileFilter)\n\n\t\ttry:\n\t\t\treturn dialog[0]\n\t\texcept IndexError:\n\t\t\treturn None", "def file_select(self):\r\n # select ui file and change file extension to .py\r\n self.lineEdit_Ui_file_selection.clear()\r\n self.lineEdit_Py_file_name.clear()\r\n options = QtWidgets.QFileDialog.Options()\r\n options |= QtWidgets.QFileDialog.DontUseNativeDialog\r\n self.fileName, _ = QtWidgets.QFileDialog.getOpenFileName(\r\n None,\r\n \"QFileDialog.getOpenFileName()\",\r\n \"\",\r\n \"UI Files (*.ui);;All Files (*)\",\r\n options=options)\r\n py_Filename = self.fileName[:-2]\r\n py_Filename = py_Filename + \"py\"\r\n self.lineEdit_Ui_file_selection.insert( self.fileName )\r\n if self.fileName:\r\n self.lineEdit_Py_file_name.insert( py_Filename )", "def open_file_browser(path):\n if sys.platform == 'win32':\n #subprocess.Popen(['start', path], shell=True)\n os.startfile(path)\n\n elif sys.platform == 'darwin':\n subprocess.Popen(['open', path])\n\n else:\n try:\n subprocess.Popen(['xdg-open', path])\n except OSError:\n logger.error(\"Presumably *nix system xdg-open failed for path: {}\".format(path))" ]
[ "0.75951433", "0.7013304", "0.7009427", "0.6953197", "0.69224757", "0.6823879", "0.669363", "0.6688277", "0.66736335", "0.6672926", "0.66545457", "0.66527754", "0.6618524", "0.65777797", "0.6551159", "0.65394413", "0.65350693", "0.65235656", "0.6490343", "0.6485696", "0.64783317", "0.646859", "0.6425014", "0.64250106", "0.6394995", "0.63840955", "0.6368174", "0.6353769", "0.63432294", "0.62897736", "0.62820596", "0.6255023", "0.6247032", "0.6239598", "0.62280285", "0.62148285", "0.62094986", "0.61979795", "0.6174385", "0.6171658", "0.6160718", "0.61583775", "0.6099598", "0.60914433", "0.6090282", "0.6090011", "0.60891235", "0.60793054", "0.6074349", "0.60624045", "0.6036775", "0.60010374", "0.5996617", "0.5954171", "0.5947476", "0.5935791", "0.5932955", "0.59277976", "0.5907891", "0.5859534", "0.5843071", "0.58375716", "0.5820548", "0.58102006", "0.5809848", "0.5805583", "0.57753605", "0.5774812", "0.5758719", "0.5743834", "0.57253116", "0.57179034", "0.57102615", "0.5710031", "0.56988233", "0.5698323", "0.5695813", "0.5690243", "0.56867796", "0.5684962", "0.5684108", "0.5679847", "0.5678009", "0.5673322", "0.5669584", "0.5666531", "0.56641036", "0.5657305", "0.56504256", "0.5645437", "0.5635203", "0.56296057", "0.5627839", "0.56243056", "0.56139517", "0.5612357", "0.56042093", "0.5588932", "0.5586663", "0.5578299" ]
0.7704559
0
Returns tuple of paths stored at class instance
def getPaths(self): return self.pathTuple
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def paths(self):\n return tuple(self._path)", "def get_paths(self):\n paths = []\n for f in dir(self):\n o = getattr(self, f)\n if callable(o) and hasattr(o, '_path'):\n paths.append(getattr(o, '_path'))\n return paths", "def get_class_paths(_class, saltclass_path):\n straight = os.path.join(saltclass_path, \"classes\", \"{}.yml\".format(_class))\n sub_straight = os.path.join(\n saltclass_path, \"classes\", \"{}.yml\".format(_class.replace(\".\", os.sep))\n )\n sub_init = os.path.join(\n saltclass_path, \"classes\", _class.replace(\".\", os.sep), \"init.yml\"\n )\n return straight, sub_init, sub_straight", "def get_paths(self):\n return self.paths", "def paths(self):\r\n return self._paths", "def paths(self) -> Paths:\n return self._paths", "def get_paths(self):\n return (self.world_fpath, self.subj_fpath, self.peds_fpath)", "def GetPaths(self):\n return self.paths", "def paths(self):\n return self._paths", "def paths(self):\n return self._paths", "def path(self) -> List[Path]:\n return self._path", "def get_path(self) :\n path = [self]\n s = self.get_parent()\n while s is not None :\n path.append(s)\n s = s.get_parent()\n path.reverse()\n return path", "def get_paths(self):\n return self.path.split(',')", "def filepaths(self):\n pass", "def path_entries(self):", "def get_path(self) -> list:\n path = []\n if self.parent:\n path = [a.name for a in self.parent.ancestors(include_self=True)]\n\n return path + [self.name]", "def paths(self):\n return list(zip(*self.collected))[0]", "def path(self):\n if bool(self._path_parameters):\n payload = {inflection.underscore(k): v for k, v, in self._path_parameters.items()}\n else:\n payload = dict()\n PathTuple = namedtuple('PathTuple', sorted(payload))\n the_tuple = PathTuple(**payload)\n return the_tuple", "def getTLDPathsTuple(self, basepath):\n return (basepath, )", "def getPath(obj):", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def paths(self):\n return self._visit(self.start)", "def pathMap(self):\n pass", "def path(self):\n ...", "def paths(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('paths')", "def warping_paths(self):\n return self.paths", "def class_path(model, variables):\n return None", "def path(self):\n\t\tif '/' in self.name:\n\t\t\treturn self.name.split(\"/\")\n\t\telse:\n\t\t\treturn self.name.split(\"\\\\\")", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def __init__(self, paths):\n self.paths = paths", "def get_val_paths(self):\n steps = self.test_structure_to_steps()\n paths = []\n\n for s in steps:\n current_paths = []\n last_class = s[-1]\n for i in range(last_class):\n path = self.base_folder_path + \"/data/validation/val_\" + str(i + 1) + \".tfrecord\"\n current_paths.append(path)\n\n paths.append(current_paths)\n\n return paths", "def get_paths(self,test=False):\n\n if test:\n\n filenames= self.filenames_test\n class_numbers=self.class_numbers_test\n\n test_dir='test/'\n\n else:\n filenames=self.filenames\n class_numbers=self.class_numbers\n\n test_dir=''\n\n for filename,cls in zip(filenames,class_numbers):\n\n #sto creando un generatore variabile(val1,val2,val3,val4)\n path=os.path.join(self.in_dir,self.class_names[cls],test_dir,filename)\n\n #yield è utilizzato quando una funzione ritorna un generatore\n yield path", "def __dir__(self):\n return list(sorted(list(set(dir(type(self)) + self._local_dir()))))", "def get_path(self):\n return self.path", "def getTLDPathsTuple(self, basepath):\n return (basepath, self.tld)", "def items(self):\n cur_t_path = self.path_t.__ops__\n return tuple(zip(cur_t_path[1::2], cur_t_path[2::2]))", "def paths_set(self):\n return self._paths_set", "def extract_path(self):\n if self.extracted_path is not None:\n return self.extracted_path\n current = self\n path = []\n while current:\n path.append([current.end, current.path_cost])\n current = current.parent\n return list(reversed(path))", "def get_files_paths(self):\n return self.__files_paths", "def log_paths(self): # pylint:disable=function-redefined\n return self._log_paths", "def get_all_path(self, conf):\n\t\tpass", "def __iter__(self):\n for path in self._paths: yield autopaths.Path(path.complete_path)", "def get_path_list(self, type_str=None):\n return list(\n reversed(\n [v.label_str for v in self.parent_gen if type_str in (None, v.type_str)]\n )\n )", "def __dir__(self) -> List[str]:\n return [*self.factories, *super().__dir__()]", "def get_paths(args):\n log, rest = get_log_path(args)\n out, _ = get_out_path(args)\n temp, _ = get_temp_path(args)\n return log, out, temp, rest", "def __dir__(self):\n dirlist = dir(self.__class__)\n for term in self:\n dirlist.append(term.name)\n\n return dirlist", "def path( self ) :\n\n return( self.__path )", "def path(self):\r\n raise NotImplementedError()", "def _get_classpath(self):\n \n classpath = []\n for path in self.paths:\n if os.path.isdir(path):\n for jar in os.listdir(path):\n if jar.endswith('.jar'):\n path = os.path.join(path, jar)\n classpath.append(os.path.normpath(path))\n else:\n classpath.append(os.path.normpath(path))\n return \":\".join(classpath)", "def get_instances_of_class(cls, folder):\n data = list()\n for _, _, filenames in os.walk(folder):\n for filename in filenames:\n if filename.endswith(\".jpg\"):\n last = filename.split(\"/\")[-1]\n if re.match(cls, last):\n data.append(last)\n return data", "def getPaths(self):\n\n trafficEndPoints = []\n # A job denotes a traffic flow, which corresponds to an iperf task.\n for job in self.config.trace.jobs:\n trafficEndPoints.append((job['src'], job['dst']))\n\n # Obtain details about user-specified non-default links.\n configuredLinks = []\n for linkInfo in self.config.topoData['linkInfos']:\n configuredLinks.append((linkInfo['src'], linkInfo['dst']))\n\n paths = None\n spec = self.config.topoData['flowSpec']\n if spec == 'shortest_path':\n # export paths info and create routing conf using shortest paths\n adjFile = self.config.adjacencyFile\n writeAdjList(self.net, adjFile)\n info(\"**** [G2]: adjacency list written to file\", adjFile, \"\\n\")\n\n outfile = os.path.join(self.config.outPath, SHORTEST_PATH_FILE)\n paths = generateShortestPaths(adjFile, outfile, trafficEndPoints, configuredLinks)\n info(\"**** [G2]: shortest paths written to file\", outfile, \"\\n\")\n # Note: Since there can be multiple shortest paths between two endpoints, solution could vary.\n elif \".json\" in spec:\n info(\"**** [G2]: reading path info from\", spec, \"\\n\")\n paths = readFromPathFile(spec)\n else:\n paths = None\n return paths", "def as_pathlib(self):\n return Path(self.absolute)", "def current_paths(self) -> Iterator[List[Path]]:\n yield self._target_paths", "def __get_path(self):\n return self.path", "def path(self):\n\t\tnode, path_back = self, []\n\t\twhile node:\n\t\t\tpath_back.append(node)\n\t\t\tnode = node.parent\n\t\treturn list(reversed(path_back))", "def get_image_paths(self):\n return self.image_paths", "def get_image_paths(self):\n return self.image_paths", "def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def get_replay_source_helper_paths(self):\n\n if self.replay_source is None:\n return None\n\n paths = []\n classes = self.get_helpers_classes()\n\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n for hc in classes:\n current_paths = []\n for c in hc:\n path = base_path + str(c).zfill(2) + \".tfrecord\"\n current_paths.append(path)\n paths.append(current_paths)\n return paths", "def hypernym_paths(self):\n paths = []\n hypernyms = self._direct_hypernyms\n if self.is_root():\n paths = [[self]]\n for hypernym in hypernyms:\n for ancestor_list in hypernym.hypernym_paths():\n ancestor_list.append(self)\n paths.append(ancestor_list)\n return paths", "def get_path_driver(self) -> set:\n value_path = os.path.join(Folders.folder_main, \n Folders.folder_storage) \n return value_path, os.path.join(value_path, 'chromedriver'), \\\n os.path.join(value_path, self.name_archive)", "def _get_as_path(self):\n return self.__as_path", "def deconstruct(self):\n c = self.__class__\n path = \"{}.{}\".format(c.__module__, c.__name__)\n return path, [self.value], {}", "def getPath(self) -> List['StateNode']:\n rest = []\n if self.previous is not None:\n rest = self.previous.getPath()\n return rest + [self]", "def path_tuples(self):\n # We want the following contents in the tarball\n # Metaflow package itself\n for path_tuple in self._walk(self.metaflow_root, exclude_hidden=False):\n yield path_tuple\n # the package folders for environment\n for path_tuple in self.environment.add_to_package():\n yield path_tuple\n # the user's working directory\n flowdir = os.path.dirname(os.path.abspath(sys.argv[0])) + '/'\n for path_tuple in self._walk(flowdir):\n yield path_tuple", "def paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n paths += self._single_tree_paths(tree, return_indices=return_indices)\n return paths", "def __iter__(self):\n for path_id in self._path_ids:\n yield (path_id, getattr(self, path_id))", "def solution_path(self) -> list[State]:", "def _ClassifyPaths(self, paths):\n arch_paths = []\n obj_paths = []\n bc_paths = []\n for path in paths:\n if path.endswith('.a') or path.endswith('.rlib'):\n # .a files are typically system libraries containing .o files that are\n # ELF files (and never BC files).\n arch_paths.append(path)\n elif bcanalyzer.IsBitcodeFile(os.path.join(self._output_directory, path)):\n # Chromium build tools create BC files with .o extension. As a result,\n # IsBitcodeFile() is needed to distinguish BC files from ELF .o files.\n bc_paths.append(path)\n else:\n obj_paths.append(path)\n return _PathsByType(arch=arch_paths, obj=obj_paths, bc=bc_paths)", "def _get_filepaths(self):\n self._printer(str(self.__len__()) + \" file paths have been parsed in \" + str(self.timer.end))\n if self._hash_files:\n return pool_hash(self.filepaths)\n else:\n return self.filepaths", "def cachepath(self):\n return [self.fs.cachepath(uri) for uri in self.uri]", "def path(self):\n p = self\n\n name = [p.name()]\n offsets = set([p._offset])\n while p.has_parent_key():\n p = p.parent_key()\n if p._offset in offsets:\n name.append(\"[path cycle]\")\n break\n name.append(p.name())\n offsets.add(p._offset)\n return '\\\\'.join(reversed(name))", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def path(self):\n # type: () -> string_types\n return self._path", "def getSelectedPaths(self):\n\t\t\n\t\tobjs = self.getSelectedDataUnits()\n\t\treturn [self.dataUnitToPath[x] for x in objs]", "def _get_path_parameters(self) -> Generator[Tuple[str, Type], None, None]:", "def _GetPath(self, kind, id):\n\n return { Database.RESOURCE : self.GetResourcePath,\n Database.TEST : self.GetTestPath,\n Database.SUITE : self.GetSuitePath } [kind] (id)", "def path(self):\n path = self.bidirectional_cpp.getPath()\n # format as list on return as SWIG returns \"tuple\"\n if len(path) <= 0:\n return None\n\n _path = []\n # Convert path to its original types and return\n for p in path:\n if p in [\"Source\", \"Sink\"]:\n _path.append(p)\n else:\n if \"int\" in self._original_node_type.__name__:\n _path.append(int(p))\n elif \"str\" in self._original_node_type.__name__:\n _path.append(str(p))\n return _path", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def path(self) :\n return self.m_path", "def in_filepath_list(class_paths: List[str]) -> List:\n registry, not_founds = build_registry(class_paths)\n builder = FilepathListBuilder()\n source = builder.build(registry)\n\n return [source, not_founds]", "def __dir__(self) -> List[str]:\n self._try_setup()\n return object.__dir__(self) # pytype: disable=attribute-error", "def __fspath__(self):\n raise NotImplementedError", "def frame_paths(frame_type, start_time, end_time, server=None):\n site = frame_type[0]\n connection = datafind_connection(server)\n times = connection.find_times(site, frame_type, \n gpsstart=start_time, \n gpsend=end_time)\n cache = connection.find_frame_urls(site, frame_type, start_time, end_time)\n paths = [entry.path for entry in cache]\n return paths", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.state[0])\n node = node.parent\n return list(reversed(path_back))", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.state)\n node = node.parent\n return list(reversed(path_back))", "def get_all_paths(self):\n seen = set()\n for v in self:\n # v in self returns all nodes in the pathgraph\n if v not in seen:\n # self [v] returns a path containing v. If the v does not belong to a path\n # a singleton path [v] is returned\n yield self[v]\n seen.update(self[v])", "def module_path_to_class_path(paths: List[str]) -> List[str]:\n class_paths = list()\n for path in paths:\n # loop all paths\n try:\n # Duck test\n # Given path can be imported by using `import_module`, it's a\n # module path.\n module = import_module(path)\n for member in dir(module):\n # check members\n klass = getattr(module, member)\n if type(klass) == type:\n # Collect class path if the member is a class.\n class_path = klass.__module__ + '.' + klass.__name__\n class_paths.append(class_path)\n\n except ModuleNotFoundError:\n # Oops, the path isn't a module path.\n # That may be a class path.\n class_paths.append(path)\n\n return class_paths", "def get_path_from_root(self):\n if not self.parent:\n return [self]\n return self.parent.get_path_from_root() + [self]", "def paths(self):\n return self._swagger", "def tester_paths():\n return [dir_queue(), dir_tester_unzip_tmp()]", "def path(self) -> str:\r\n path = []\r\n path.append(self._item[\"text\"])\r\n current_item: str = self._id\r\n\r\n while (parent := self._tree.parent(current_item)) != \"\":\r\n tree_item = self._tree.item(parent)\r\n path.append(tree_item[\"text\"])\r\n current_item = parent\r\n\r\n return REGISTRY_PATH_SEPARATOR.join(reversed(path))", "def __dir__(self) -> Iterable[str]:\n return chain(super().__dir__(), dir(self.simulator))", "def get_class_path(instance):\n return '{}.{}'.format(\n instance.__module__,\n instance.__name__ if inspect.isclass(instance) else instance.__class__.__name__)", "def get_train_paths(self):\n\n assert self.dataset_type_first_step == \"sequential\" or self.dataset_type_first_step == \"overlapped\", \\\n \"Error! dataset_type_first_step should be set to 'sequential' or to 'overlapped'\"\n\n assert self.dataset_type_incremental == \"sequential\" or self.dataset_type_incremental == \"overlapped\" or \\\n self.dataset_type_incremental == \"disjoint\", \\\n \"Error! dataset_type_incremental should be set to 'sequential', to 'overlapped' or to 'disjoint'\"\n\n steps = self.test_structure_to_steps()\n paths = []\n\n for s in enumerate(steps):\n if s[0] == 0:\n folder = \"training_first_step\"\n suffix = self.dataset_type_first_step\n else:\n folder = \"training_incremental\"\n suffix = self.dataset_type_incremental\n\n if s[1][0] == s[1][-1]:\n class_id = str(s[1][0])\n else:\n class_id = str(s[1][0]) + \"-\" + str(s[1][-1])\n\n current_path = self.base_folder_path + \"/data/\" + folder + \"/train_\" + suffix + \\\n \"_\" + class_id + \".tfrecord\"\n\n paths.append(current_path)\n\n return paths", "def path(self): # Path taken to reach Goal\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))" ]
[ "0.7493626", "0.72214353", "0.71332705", "0.6974973", "0.67577744", "0.6682504", "0.66712487", "0.66641986", "0.66585755", "0.66585755", "0.66517574", "0.6619837", "0.6619723", "0.66154474", "0.65316343", "0.6466854", "0.64451087", "0.63843393", "0.6338825", "0.6330856", "0.6270424", "0.6206903", "0.61377454", "0.6111313", "0.6090677", "0.60639614", "0.60602623", "0.60529786", "0.6050811", "0.6049052", "0.6043993", "0.6005553", "0.5987285", "0.5985881", "0.5956141", "0.593096", "0.5929396", "0.5886092", "0.58487576", "0.58474684", "0.5815357", "0.5782436", "0.57792276", "0.57771486", "0.57755244", "0.57679963", "0.5765251", "0.57492024", "0.5748806", "0.57475424", "0.5740314", "0.5734888", "0.5725781", "0.5724788", "0.571732", "0.5711171", "0.5711171", "0.5710064", "0.57052135", "0.5690648", "0.56884664", "0.5686355", "0.5684595", "0.568421", "0.56765205", "0.5673258", "0.5640728", "0.56296813", "0.56270206", "0.5609951", "0.5608836", "0.5606342", "0.5606139", "0.5602862", "0.5602862", "0.5602862", "0.56020457", "0.5596885", "0.55948365", "0.55781657", "0.55708164", "0.5558445", "0.5558445", "0.5556425", "0.5545314", "0.5538579", "0.5526198", "0.5526125", "0.5523838", "0.55196244", "0.55186343", "0.551105", "0.551079", "0.55091023", "0.5499329", "0.54975176", "0.54867435", "0.5483472", "0.5476369", "0.5472575" ]
0.7340977
1
This function will start the microphone and adjust the minimum energy required to detect a voice based on the ambiant noise.
def fine_tune(self, duration = 2): with sr.Microphone() as source: self.recorder.adjust_for_ambient_noise(source, duration=duration) return self.recorder.energy_threshold
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def StartMicrophone(self):\n if not os.path.exists('static'):\n os.mkdir('static')\n microphone = olpc.Microphone('static/sound.ogg')\n microphone.StartMicrophone()", "def speech_recognize_from_microphone():\n speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)\n speech_config.request_word_level_timestamps()\n speech_config.output_format = speechsdk.OutputFormat(1)\n\n speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)\n\n done = False\n\n def stop_cb(evt):\n \"\"\"callback that signals to stop continuous recognition upon receiving an event `evt`\"\"\"\n print('CLOSING on {}'.format(evt))\n nonlocal done\n done = True\n\n def recognized_cb(evt):\n \"\"\"callback for recognized event\"\"\"\n if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech:\n #print('RECOGNIZED: {}'.format(evt.result.text))\n #print('All params: {}'.format(evt.result))\n #print(evt.result.json)\n response = json.loads(evt.result.json)\n #print('All params: {}'.format(response))\n Text = response[\"DisplayText\"]\n duration = 0;\n for word in response[\"NBest\"][0][\"Words\"]:\n duration += word[\"Duration\"]\n duration = duration / 10000000\n print(\"dur :\"+str(duration)+\" text: \" + Text)\n\n # Connect callbacks to the events fired by the speech recognizer\n speech_recognizer.recognized.connect(recognized_cb)\n speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))\n speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))\n speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))\n # stop continuous recognition on either session stopped or canceled events\n speech_recognizer.session_stopped.connect(stop_cb)\n speech_recognizer.canceled.connect(stop_cb)\n\n # Start keyword recognition\n speech_recognizer.start_continuous_recognition()\n\n while not done:\n time.sleep(.5)\n\n speech_recognizer.stop_continuous_recognition()", "def m() -> str:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.adjust_for_ambient_noise(source)\n logger.info(\"Microphone Active! Waiting for prompt!\")\n audio = r.listen(source)\n\n s = r.recognize_google(audio) #Send the audio to google\n result = s.lower()\n return result", "def start_record_microphone(self):\n if not os.path.exists(self.audio_file_folder):\n os.makedirs(self.audio_file_folder)\n\n self.microphone_handler.start_recording()\n self.current_session.put(self.microphone_handler.current_session)", "def mic_input():\n try:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print('Say something...')\n r.pause_threshold = 1\n r.adjust_for_ambient_noise(source, duration=1)\n audio = r.listen(source)\n try:\n command = r.recognize_google(audio).lower()\n print('You said: ' + command + '\\n')\n except sr.UnknownValueError:\n print('....')\n command = self.mic_input()\n return command\n except Exception as e:\n print(e)\n return False", "def hear_speech():\n print('Please speak and wait...')\n while (1):\n try:\n with sr.Microphone() as source2:\n # print('Please wait while we adjust the surrounding noise.')\n r.adjust_for_ambient_noise(source2, duration=0.2)\n # listens for the user's input\n audio2 = r.listen(source2)\n data = r.recognize_google(audio2)\n\n except sr.UnknownValueError:\n data = 0\n if data != 0:\n print('Recognizing...')\n return data", "def initAudio(self):\n\t\t# Initialize pitch detection\n\t\tself.listener = PitchDetect(channels=1)\n\t\tself.listener.listen()\n\t\tself.recording = False\n\t\tself.paused = False", "def handle_mic_listen(_):\n loop.responsive_recognizer.trigger_listen()", "def start(self):\n try:\n print(\"Initializing driver\")\n pcms = alsaaudio.pcms()\n mixers = alsaaudio.mixers()\n print(f'Available PCMs: {pcms}')\n print(f'Available Mixers: {mixers}')\n self._device = alsaaudio.PCM(device=self._dev_name)\n self._mixer = alsaaudio.Mixer(device=self._dev_name, control=self._mixer_ctrl)\n\n # Unmute if it is muted at first\n if self._mixer.getmute():\n self._mixer.setmute(0)\n except alsaaudio.ALSAAudioError as e:\n print(f\"{type(e).__name__} occured!\")\n print(f\"With message: {e.args}... Failed to initialize Mixer!\")\n self._mixer = None\n except Exception as e:\n print(f\"Something unexpected happend! {e}\")\n self._mixer = None", "def init_speech():\n program = True\n while program is True:\n print('Listening...')\n with sr.Microphone() as source:\n audio = r.listen(source)\n\n try:\n command = r.recognize_google(audio)\n print(command)\n except:\n continue\n\n if command in ['quit', 'exit', 'exits', 'exxat', 'bye', 'by' 'good-by', 'goodbye']:\n program = False\n play_audio('./audio/sentnc16.wav')\n break\n\n cmmd.discover(command)", "def run(self) -> None:\n self.microphone.start()\n try:\n self._run()\n finally:\n self.microphone.stop()", "def takecommand():\r\n\r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"listening....\")\r\n r.pause_threshold=1\r\n \"\"\" Pause_threshold will let you to speak with your own pace\"\"\"\r\n\r\n #r.energy_threshold=500\r\n \"\"\" energy threshold will stop hindrens from outside\"\"\"\r\n\r\n audio=r.listen(source)\r\n\r\n try:\r\n print(\"In process of recognizing..\")\r\n query=r.recognize_google(audio,language=\"en-in\")\r\n \"\"\" query will take date that has been spoken by user with the help of google API\"\"\"\r\n print(\"you said :\",query)\r\n\r\n except Exception as e:\r\n print(\"can you speak this again\")\r\n return \"none\"\r\n return query", "def setup_audio(self):\n\t\t\n\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/setup_audio.sh'\n\t\tsubprocess.call([path_to_file])", "def start(self):\n while True:\n #requests.get(\"http://localhost:8080/clear\")\n if use_launch_phrase:\n recognizer, audio = self.speech.listen_for_audio()\n if self.speech.is_call_to_action(recognizer, audio):\n self.__acknowledge_action()\n self.decide_action()\n else:\n self.decide_action()", "def process_speak_listen(device_index, mp3_filename, text, record, flag):\n\n mp3_filename = mp3_filename + \".mp3\"\n try:\n tts = gTTS(text=text, lang='en', slow=False)\n tts.save(mp3_filename)\n playsound(mp3_filename)\n os.remove(mp3_filename)\n\n if flag != 1:\n with sr.Microphone(device_index=device_index) as source:\n record.adjust_for_ambient_noise(source, duration=1)\n print(\"Speak:\")\n os.system(\"zenity --progress --width=400 --height=200 --title='Speak Now' \"\n \"--text='Speak Now......No need to click OK button' --no-cancel &\")\n try:\n audio = record.listen(source, timeout=5)\n text = record.recognize_google(audio)\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(text)\n except LookupError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : LookupError - Could not able to understand\")\n text = None\n except speech_recognition.WaitTimeoutError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : WaitTimeoutError - Could not able to listen anything for 5 seconds\")\n text = None\n except speech_recognition.UnknownValueError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : UnknownValueError - Could not able to listen anything for 5 seconds\")\n text = None\n except gtts.tts.gTTSError:\n print(\"ERROR : Connection Error : No internet connection.\")\n exit_program()\n except PermissionError:\n print(\"ERROR : No permission\")\n exit_program()\n\n return text", "def takeCommand():\r\n r=sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n print(\"Listening....\")\r\n r.pause_threshold = 1 #pause threshold is if we pause in between speaking it shouldnt consider the sentence as complete\r\n audio = r.listen(source)\r\n\r\n try:\r\n print(\"Recognizing...\")\r\n query= r.recognize_google(audio,language='en-in')\r\n print(f\"User said: {query} \\n\")\r\n\r\n except Exception as e:\r\n print(e)\r\n print(\"Please say that again...\")\r\n return \"None\"\r\n\r\n\r\n return query", "def start_audio_relay(self):\n try:\n self.add_audio_client()\n receive_audio_client_socket, address = \\\n self.receive_audio_socket.accept()\n print(\"connected relay audio\")\n name = self.receive_mes(receive_audio_client_socket)\n self.send_chunk(\"calling\".encode(), receive_audio_client_socket)\n while name not in self.client_audio_dict:\n time.sleep(TIME_SLEEP)\n print(\"waiting for the other client to connect\")\n self.send_chunk(\"wait\".encode(), receive_audio_client_socket)\n self.send_chunk(\"start\".encode(), receive_audio_client_socket)\n send_sock = self.client_audio_dict[name]\n self.receive_and_send_audio(receive_audio_client_socket, send_sock)\n except socket.error as e:\n print(\"socket audio relay fail: \", e)\n self.close_all()\n except Exception as e:\n print(\"audio relay exception: \", e)\n self.close_all()", "def audio_microfrontend(audio, sample_rate=16000, window_size=25, window_step=10, num_channels=32, upper_band_limit=7500, lower_band_limit=125, smoothing_bits=10, even_smoothing=0.025, odd_smoothing=0.06, min_signal_remaining=0.05, enable_pcan=False, pcan_strength=0.95, pcan_offset=80, gain_bits=21, enable_log=True, scale_shift=6, left_context=0, right_context=0, frame_stride=1, zero_padding=False, out_scale=1, out_type=_dtypes.uint16, name=None):\n _ctx = _context._context or _context.context()\n tld = _ctx._thread_local_data\n if tld.is_eager:\n try:\n _result = pywrap_tfe.TFE_Py_FastPathExecute(\n _ctx._context_handle, tld.device_name, \"AudioMicrofrontend\", name,\n tld.op_callbacks, audio, \"sample_rate\", sample_rate, \"window_size\",\n window_size, \"window_step\", window_step, \"num_channels\", num_channels,\n \"upper_band_limit\", upper_band_limit, \"lower_band_limit\",\n lower_band_limit, \"smoothing_bits\", smoothing_bits, \"even_smoothing\",\n even_smoothing, \"odd_smoothing\", odd_smoothing,\n \"min_signal_remaining\", min_signal_remaining, \"enable_pcan\",\n enable_pcan, \"pcan_strength\", pcan_strength, \"pcan_offset\",\n pcan_offset, \"gain_bits\", gain_bits, \"enable_log\", enable_log,\n \"scale_shift\", scale_shift, \"left_context\", left_context,\n \"right_context\", right_context, \"frame_stride\", frame_stride,\n \"zero_padding\", zero_padding, \"out_scale\", out_scale, \"out_type\",\n out_type)\n return _result\n except _core._FallbackException:\n try:\n return audio_microfrontend_eager_fallback(\n audio, sample_rate=sample_rate, window_size=window_size,\n window_step=window_step, num_channels=num_channels,\n upper_band_limit=upper_band_limit,\n lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits,\n even_smoothing=even_smoothing, odd_smoothing=odd_smoothing,\n min_signal_remaining=min_signal_remaining,\n enable_pcan=enable_pcan, pcan_strength=pcan_strength,\n pcan_offset=pcan_offset, gain_bits=gain_bits,\n enable_log=enable_log, scale_shift=scale_shift,\n left_context=left_context, right_context=right_context,\n frame_stride=frame_stride, zero_padding=zero_padding,\n out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except (TypeError, ValueError):\n result = _dispatch.dispatch(\n audio_microfrontend, audio=audio, sample_rate=sample_rate,\n window_size=window_size,\n window_step=window_step,\n num_channels=num_channels,\n upper_band_limit=upper_band_limit,\n lower_band_limit=lower_band_limit,\n smoothing_bits=smoothing_bits,\n even_smoothing=even_smoothing,\n odd_smoothing=odd_smoothing,\n min_signal_remaining=min_signal_remaining,\n enable_pcan=enable_pcan,\n pcan_strength=pcan_strength,\n pcan_offset=pcan_offset,\n gain_bits=gain_bits, enable_log=enable_log,\n scale_shift=scale_shift,\n left_context=left_context,\n right_context=right_context,\n frame_stride=frame_stride,\n zero_padding=zero_padding,\n out_scale=out_scale, out_type=out_type,\n name=name)\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\n return result\n raise\n except _core._NotOkStatusException as e:\n _ops.raise_from_not_ok_status(e, name)\n # Add nodes to the TensorFlow graph.\n if sample_rate is None:\n sample_rate = 16000\n sample_rate = _execute.make_int(sample_rate, \"sample_rate\")\n if window_size is None:\n window_size = 25\n window_size = _execute.make_int(window_size, \"window_size\")\n if window_step is None:\n window_step = 10\n window_step = _execute.make_int(window_step, \"window_step\")\n if num_channels is None:\n num_channels = 32\n num_channels = _execute.make_int(num_channels, \"num_channels\")\n if upper_band_limit is None:\n upper_band_limit = 7500\n upper_band_limit = _execute.make_float(upper_band_limit, \"upper_band_limit\")\n if lower_band_limit is None:\n lower_band_limit = 125\n lower_band_limit = _execute.make_float(lower_band_limit, \"lower_band_limit\")\n if smoothing_bits is None:\n smoothing_bits = 10\n smoothing_bits = _execute.make_int(smoothing_bits, \"smoothing_bits\")\n if even_smoothing is None:\n even_smoothing = 0.025\n even_smoothing = _execute.make_float(even_smoothing, \"even_smoothing\")\n if odd_smoothing is None:\n odd_smoothing = 0.06\n odd_smoothing = _execute.make_float(odd_smoothing, \"odd_smoothing\")\n if min_signal_remaining is None:\n min_signal_remaining = 0.05\n min_signal_remaining = _execute.make_float(min_signal_remaining, \"min_signal_remaining\")\n if enable_pcan is None:\n enable_pcan = False\n enable_pcan = _execute.make_bool(enable_pcan, \"enable_pcan\")\n if pcan_strength is None:\n pcan_strength = 0.95\n pcan_strength = _execute.make_float(pcan_strength, \"pcan_strength\")\n if pcan_offset is None:\n pcan_offset = 80\n pcan_offset = _execute.make_float(pcan_offset, \"pcan_offset\")\n if gain_bits is None:\n gain_bits = 21\n gain_bits = _execute.make_int(gain_bits, \"gain_bits\")\n if enable_log is None:\n enable_log = True\n enable_log = _execute.make_bool(enable_log, \"enable_log\")\n if scale_shift is None:\n scale_shift = 6\n scale_shift = _execute.make_int(scale_shift, \"scale_shift\")\n if left_context is None:\n left_context = 0\n left_context = _execute.make_int(left_context, \"left_context\")\n if right_context is None:\n right_context = 0\n right_context = _execute.make_int(right_context, \"right_context\")\n if frame_stride is None:\n frame_stride = 1\n frame_stride = _execute.make_int(frame_stride, \"frame_stride\")\n if zero_padding is None:\n zero_padding = False\n zero_padding = _execute.make_bool(zero_padding, \"zero_padding\")\n if out_scale is None:\n out_scale = 1\n out_scale = _execute.make_int(out_scale, \"out_scale\")\n if out_type is None:\n out_type = _dtypes.uint16\n out_type = _execute.make_type(out_type, \"out_type\")\n try:\n _, _, _op, _outputs = _op_def_library._apply_op_helper(\n \"AudioMicrofrontend\", audio=audio, sample_rate=sample_rate,\n window_size=window_size,\n window_step=window_step,\n num_channels=num_channels,\n upper_band_limit=upper_band_limit,\n lower_band_limit=lower_band_limit,\n smoothing_bits=smoothing_bits,\n even_smoothing=even_smoothing,\n odd_smoothing=odd_smoothing,\n min_signal_remaining=min_signal_remaining,\n enable_pcan=enable_pcan,\n pcan_strength=pcan_strength,\n pcan_offset=pcan_offset, gain_bits=gain_bits,\n enable_log=enable_log, scale_shift=scale_shift,\n left_context=left_context,\n right_context=right_context,\n frame_stride=frame_stride,\n zero_padding=zero_padding, out_scale=out_scale,\n out_type=out_type, name=name)\n except (TypeError, ValueError):\n result = _dispatch.dispatch(\n audio_microfrontend, audio=audio, sample_rate=sample_rate,\n window_size=window_size,\n window_step=window_step,\n num_channels=num_channels,\n upper_band_limit=upper_band_limit,\n lower_band_limit=lower_band_limit,\n smoothing_bits=smoothing_bits,\n even_smoothing=even_smoothing,\n odd_smoothing=odd_smoothing,\n min_signal_remaining=min_signal_remaining,\n enable_pcan=enable_pcan,\n pcan_strength=pcan_strength,\n pcan_offset=pcan_offset, gain_bits=gain_bits,\n enable_log=enable_log, scale_shift=scale_shift,\n left_context=left_context,\n right_context=right_context,\n frame_stride=frame_stride,\n zero_padding=zero_padding, out_scale=out_scale,\n out_type=out_type, name=name)\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\n return result\n raise\n _result = _outputs[:]\n if _execute.must_record_gradient():\n _attrs = (\"sample_rate\", _op._get_attr_int(\"sample_rate\"), \"window_size\",\n _op._get_attr_int(\"window_size\"), \"window_step\",\n _op._get_attr_int(\"window_step\"), \"num_channels\",\n _op._get_attr_int(\"num_channels\"), \"upper_band_limit\",\n _op.get_attr(\"upper_band_limit\"), \"lower_band_limit\",\n _op.get_attr(\"lower_band_limit\"), \"smoothing_bits\",\n _op._get_attr_int(\"smoothing_bits\"), \"even_smoothing\",\n _op.get_attr(\"even_smoothing\"), \"odd_smoothing\",\n _op.get_attr(\"odd_smoothing\"), \"min_signal_remaining\",\n _op.get_attr(\"min_signal_remaining\"), \"enable_pcan\",\n _op._get_attr_bool(\"enable_pcan\"), \"pcan_strength\",\n _op.get_attr(\"pcan_strength\"), \"pcan_offset\",\n _op.get_attr(\"pcan_offset\"), \"gain_bits\",\n _op._get_attr_int(\"gain_bits\"), \"enable_log\",\n _op._get_attr_bool(\"enable_log\"), \"scale_shift\",\n _op._get_attr_int(\"scale_shift\"), \"left_context\",\n _op._get_attr_int(\"left_context\"), \"right_context\",\n _op._get_attr_int(\"right_context\"), \"frame_stride\",\n _op._get_attr_int(\"frame_stride\"), \"zero_padding\",\n _op._get_attr_bool(\"zero_padding\"), \"out_scale\",\n _op._get_attr_int(\"out_scale\"), \"out_type\",\n _op._get_attr_type(\"out_type\"))\n _inputs_flat = _op.inputs\n _execute.record_gradient(\n \"AudioMicrofrontend\", _inputs_flat, _attrs, _result)\n _result, = _result\n return _result", "def __init__(self,\n words_per_minute=15.0,\n tone_frequency=500.0,\n sample_rate=11025,\n audio_file_name='morse.wav'):\n self.words_per_minute = words_per_minute\n self.dot_time_in_msec = 0.0\n self.tone_frequency = tone_frequency\n self.sample_rate = sample_rate\n self.sample_period = 1.0 / float(self.sample_rate)\n self.audio_file_name = audio_file_name\n # Buffers to cache synthesized sample data.\n self.pulse_shaping_list = []\n self.dot_sample_buffer = None\n self.dash_sample_buffer = None\n self.silence_4_sample_buffer = None\n self.silence_2_sample_buffer = None\n self.silence_1_sample_buffer = None\n # The main sample buffer.\n self.sample_buffer = None\n # Text queue data.\n self.text_queue = queue.Queue()\n self.stop_and_clear_queue = False\n # Set the dot time in milliseconds based on the sending speed.\n self.set_words_per_minute(self.words_per_minute)\n # Initialize the sample buffers.\n self._cache_dot_dash_sample_data()\n self._cache_silence_sample_data()\n # Audio data.\n self.player = None\n self.audio_finished_event = threading.Event()\n self.audio_thread_continue = True\n threading.Thread.__init__(self)\n # The inherited threading.start() methods calls the\n # derived self.run() method in another thread.\n self.start()", "def process_audio(self, voice_rec_array):\n \n if self.use_voice:\n \n voice = voice_rec_array[0]\n \n if self.prev_voice_command != voice:\n \n print(f'voice command: {voice}')\n\n if 'takeoff' in voice:\n self.drone.takeoff() \n print('takeoff')\n\n if 'land' in voice:\n if 'palm' in voice:\n print('palmland')\n self.palm_land_approach()\n\n else:\n self.toggle_tracking(False)\n # self.tracking = False\n self.drone.land()\n self.drone.quit()\n cv2.destroyAllWindows() \n os._exit(0)\n \n \n if 'tracking' in voice:\n if 'no' in voice:\n self.tracking = False\n else:\n self.tracking = True\n\n\n if 'distance' in voice:\n if 'off' in voice:\n self.distance_mode = False\n self.keep_distance = None\n else:\n self.distance_mode = True\n \n\n if 'picture' in voice:\n print('picture in command')\n self.picture_target = re.findall(\"pic_target\\s:\\s'([a-zA-Z\\s]+)\",voice)[0].replace('the ','')\n self.toggle_tracking(tracking=False)\n print('tracking off')\n self.picture_approach = True\n self.target_height = None\n self.search_start_time = time.time()\n \n if 'come' in voice:\n self.rth = True\n\n self.prev_voice_command = voice\n\n # if 'move' in voice:\n # amount = None\n # amount = int(re.findall('[0-9]{2}', voice)[0])\n # print(amount)\n\n # if amount is not None:\n\n # if amount > 30:\n # amount = 30\n # try:\n # if 'forwards' in voice:\n # self.drone.forward(amount)\n \n # if 'backwards' in voice:\n # self.drone.backward(amount)\n\n # if 'left' in voice:\n # self.drone.left(amount)\n\n # if 'right' in voice:\n # self.drone.right(amount)\n\n # self.move_timestamp = time.time()\n\n\n # except:\n # print('not possible')\n \n # self.prev_voice_command = voice", "def main():\n # transcribe_audio()\n summarize()", "def filter_audio(audio):\n\n # Calculate voice energy for every 123 ms block\n apower = lr.amplitude_to_db(np.abs(lr.stft(audio, n_fft=2048)), ref=np.max)\n\n # Summarize energy of every rate, normalize\n apsums = np.sum(apower, axis=0) ** 2\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Smooth the graph for saving short spaces and pauses, remove sharpness\n apsums = np.convolve(apsums, np.ones((9,)), 'same')\n # Normalize again\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Set noise limit to 35% over voice\n apsums = np.array(apsums > 0.35, dtype=bool)\n\n # Extend the blocks every on 125ms\n # before separated samples (2048 at block)\n apsums = np.repeat(apsums, np.ceil(len(audio) / len(apsums)))[:len(audio)]\n\n return audio[apsums]", "def prepare_media(self):\n start=self.get_pref('starttime')\n stop=self.get_pref('stoptime')\n## volume=limit(self.get_pref('volume'),0,200) # use limit() to ensure that volume pref is sensible...\n m=self.instance.media_new(self.mrl())\n# NONE of the following options work\n# m=self.instance.media_new(self.mrl(),'gain=0.2')\n## m=self.instance.media_new(self.mrl(),'sout-raop-volume=%s' % volume)\n# m=self.instance.media_new(self.mrl(),'audio-replay-gain-mode=track','--audio-replay-gain-default=0.2')\n if start:\n m.add_options('start-time=%s' % start) \n if stop:\n m.add_options('stop-time=%s' % stop) \n# the following test code DOES NOT WORK, though it does in cvlc at the command line, eg > cvlc my.mp3 --gain=0.2\n# gain=\"1.5\"\n# print \"SETTING GAIN for %s at %s%%\" % (self.uid,gain)\n# m.add_option('gain=%s' % gain)\n return m", "def _recognise_speech() -> None:\n recogniser: Recogniser = SpeechRecogniser(\n JackRobot(\n SpeechEngine(\n )\n )\n )\n\n while True:\n recogniser.run()", "def start_soundtrack(self):\n sources = screens['Intro']['music']\n self.source = choice(sources)\n Logger.info('Chose \"{}\" as the intro music.'.format(self.source))\n try:\n SoundManager.music[self.source]\n except KeyError:\n SoundManager.add_music(self.source, self)\n SoundManager.play_music(self.source)", "def main():\r\n\r\n ### Choose and Import File\r\n\r\n inSound = Sound()\r\n\r\n rate = inSound.rate\r\n data = inSound.data\r\n dataLength = len(data)\r\n \r\n info = inSound.get_info()\r\n head, filename = os.path.split(info[0]) # get filename of input\r\n \r\n # Decide output directory and filename\r\n outDir = r'out'\r\n outFile = os.path.join(outDir, 'out_'+filename)\r\n\r\n # Check if data has multiple channels, if yes use only one\r\n if(len(data.shape) > 1):\r\n data = data[:,0]\r\n\r\n\r\n ### Set All Parameters\r\n\r\n #get parameters from user dialogue\r\n params = getParameters()\r\n\r\n numChannels = params['numChannels'][0] # number of Channels\r\n loFreq = params['loFreq'][0] # lower bound on frequencies\r\n hiFreq = params['hiFreq'][0] # upper bound on frequencies\r\n plotChannels = params['plotChannels'][0] # if it should plot the Gammatone channels\r\n block_time = params['block_time'][0] # in ms\r\n block_shift = params['block_shift'][0] # in ms\r\n selectChannels = params['selectChannels'][0] # number of channels to activate at a single time\r\n\r\n\r\n ### Filter input file\r\n\r\n filtered, channel_fs = filterDataGamaTone(data, rate, numChannels, loFreq, hiFreq, plotChannels)\r\n\r\n\r\n ### Gammatones -> Stimulation Amplitude for time block\r\n\r\n samples_in_block = np.floor(block_time * rate / 1000).astype('int')\r\n samples_in_shift = np.floor(block_shift * rate / 1000).astype('int')\r\n\r\n summed = gammatoneToAmplitude(filtered, samples_in_block, samples_in_shift)\r\n\r\n # only activate the n electrodes that have the largest stimulation\r\n amps = n_largest_channels(summed, n=selectChannels)\r\n\r\n \r\n #### Sound reconstruction\r\n\r\n # for each timeblock we need to duplicate enough samples to fill it at sample rate\r\n amps_samples = np.repeat(amps, samples_in_shift, axis=1)\r\n #trim end to get same length as input\r\n amps_samples = amps_samples[:,:dataLength] \r\n\r\n # from amplitude samples and frequencies, reconstruct sound\r\n res_data = generateSound(amps_samples, channel_fs, rate)\r\n\r\n\r\n ### Write to output file\r\n write(outFile, rate, res_data)\r\n print('Wrote file to: \\n' + outFile)", "def takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source: #don't forget the () after microphone\n print(\"Listening ...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing..\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n print(e)\n print(\"Say that again please..\")\n return \"None\"\n return query", "def voice_recognizer():\n while dr.ttsIsSpeaking().result or dr.mediaIsPlaying().result:\n time.sleep(1)\n return dr.recognizeSpeech().result", "def act(self, audio_file=None):\n #file as source\n if self.src == 'file':\n if audio_file is None:\n raise ValueError(\"Please provide a audio_file\")\n return None\n elif not os.path.exists(audio_file):\n raise FileNotFoundError(\"Specified file not found\")\n return None\n else:\n file = speech_recognition.AudioFile(audio_file)\n with file:\n speech = self.recog_obj.record(file)\n \n #mic as source\n elif self.src == 'mic':\n if audio_file is not None:\n print(\"WARNING: source is set to device microphone. Audio file will be ignored\\n\")\n \n try:\n with self.mic_obj:\n print(\"Speak into the mic....\\n\")\n self.recog_obj.adjust_for_ambient_noise(self.mic_obj)\n speech = self.recog_obj.listen(self.mic_obj)\n #if microphone is not detected\n except OSError:\n print(\"Error: Microphone not detected\")\n return None\n \n \n try:\n print(\"Please wait while we transcribe...\\n\")\n text = self.recog_obj.recognize_google(speech, language='en', show_all=self.debug)\n \n #if audio is not detected\n except speech_recognition.UnknownValueError:\n print(\"Error: Sorry audio not detected by device microphone\")\n return None\n \n #if there is connection issue or api issue\n except speech_recognition.RequestError:\n print(\"Error: API for transcription is not reachable. There may be some connection issue or server side issue\")\n return None\n \n #for imposing various rules to text \n #But if debug mode is enabled, transcript variable will store a dictionary of various transcriptions \n #along with their confidence probabilities, so conversion rules are disabled meanwhile \n transcript = self.tcr.deconcat(text) if not self.debug else text\n return transcript", "def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))", "def __init__(self, audio_fname, good_range=None, **kwargs):\n # Some parameters\n self.sampling_rate = 44100 # This is essentia's default\n self.stroke_length = kwargs.get('stroke_length', 0.5) # In seconds\n self.clip_start = kwargs.get('clip_start', True) # In seconds\n self.clip_end = kwargs.get('clip_end', True) # In seconds\n\n # Getting the audio signal\n self.audio_fname = audio_fname\n # Following is an audio signal sampled in 44100Hz (essentia default)\n self.audio = MonoLoader(filename=audio_fname)()\n\n # Cleaning edges\n try:\n self.audio = self.audio[good_range[0]:good_range[1]]\n except:\n pass\n\n # clipping\n self.audio_thd = 0.05\n self.beginning_buffer = 1 # in seconds\n if self.clip_start:\n clipped_start = np.argmax(self.audio>self.audio_thd) - self.beginning_buffer*self.sampling_rate\n clipped_start = max(0, clipped_start)\n self.audio = self.audio[clipped_start:-1]\n\n if self.clip_end:\n reversed_audio = self.audio[::-1]\n clipped_end = len(reversed_audio) - np.argmax(reversed_audio>self.audio_thd) - 1 + self.beginning_buffer*self.sampling_rate\n self.audio = self.audio[:clipped_end]\n\n # Some parameter that will be defined by signal processing\n self.onset_times = False # In seconds\n self.onset_samples = False # As sample number in the audio sampling\n self.strokes = False\n self.stroke_df = False\n self.feature_table = False", "def voice_increase():\n request_command(tv_command=TVCommand.voice_increase)", "def on_vader_start(ob, message):\n text='\"Please start speaking\"'\n subprocess.call('espeak '+ text, shell=True)\n logging.debug(\"Listening...\")", "def start(self):\n self.kb_client.subscribe(self.kb_ID, {\"_data\": {\"tag\": TAG_ANSWER, \"text\": \"$input\", \"timestamp\": \"$time\", \"language\": \"$lang\"}}, self.add_emotion) # from the 'gnlp' module", "def record_audio():\n voiceObj = voice_rec()\n text = voiceObj.start() \n return text", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def run(self):\n self.__engine = engine = pyttsx.init()\n if self.__voice_id is not None:\n engine.setProperty('voice', self.__voice_id)\n engine.connect('finished-utterance', self.__next_utterance)\n engine.say('Starting voice process')\n engine.startLoop()", "def takecommand():\n r = src.Recognizer()\n with src.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"user said: {query}\")\n\n except Exception as e:\n speak(\"Sorry, Can You repeat this please\")\n query = None\n return query\n return query", "def start_recording(self) -> None:\n # Clear the internal ring buffer.\n self._buffer.fill(0)\n\n # Start recording using sounddevice's InputStream.\n self._stream.start()", "def enable_sound(self):\n\t\tif self._setting.get(FIFE_MODULE, \"PlaySounds\"): # Set up sound if it is enabled\n\t\t\tself.soundclippool = self.engine.getSoundClipPool()\n\t\t\tself.emitter['bgsound'] = self.soundmanager.createEmitter()\n\t\t\tself.emitter['bgsound'].setGain(self._setting.get(UH_MODULE, \"VolumeMusic\"))\n\t\t\tself.emitter['bgsound'].setLooping(False)\n\t\t\tself.emitter['effects'] = self.soundmanager.createEmitter()\n\t\t\tself.emitter['effects'].setGain(self._setting.get(UH_MODULE, \"VolumeEffects\"))\n\t\t\tself.emitter['effects'].setLooping(False)\n\t\t\tself.emitter['speech'] = self.soundmanager.createEmitter()\n\t\t\tself.emitter['speech'].setGain(self._setting.get(UH_MODULE, \"VolumeEffects\"))\n\t\t\tself.emitter['speech'].setLooping(False)\n\t\t\tself.emitter['ambient'] = []\n\t\t\tself.music_rand_element = random.randint(0, len(self.menu_music) - 1)\n\t\t\tself.initial_menu_music_element = self.music_rand_element\n\n\t\t\tself.check_music() # Start background music\n\t\t\tExtScheduler().add_new_object(self.check_music, self, loops=-1)", "def takeCommand():\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n print(\"Recognizing... \")\n voice_input = r.recognize_google(audio, language=\"en-US\")\n print(f\"The user said: {voice_input}\\n\")\n except Exception as e:\n # print(e)\n print(\"Please say that again\")\n return \"None\"\n return voice_input", "def take_command(self):\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening.....\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n try:\r\n query = r.recognize_google(audio, language=\"en-in\")\r\n print(\"Recognizing.....\")\r\n print(\"Query=\", query)\r\n except Exception as e :\r\n print(e)\r\n self.speak(\"Say that again please....\")\r\n return \"None\"\r\n return query", "def start(self):\n while True:\n requests.get(\"http://localhost:8080/clear\") #clearing the screen on the web browser\n speech=\"Welcome to Smart Mirror !!\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % speech) # calling the text to appear on the browser\n self.speech.synthesize_text(\"hello\"+speech) #synthesizing the text into speech\n speech1=\"Say The launch Phrase .\" #asking the user to say the lauch phrase\n self.speech.synthesize_text(speech1) #speaking of the above line,\n if self.vision.recognize_face(): #checking if\n print \"Face Found\"\t\t\t#the person is infront of camera\n if use_launch_phrase:\t\t\t#checking whether to use the launch phrase or not\n recognizer, audio = self.speech.listen_for_audio()\t\t#initializing\n if self.speech.is_call_to_action(recognizer, audio):\t#checking if the audio is recognized\n self.__acknowledge_action()\t\t\t#if it is recognized take action\n self.decide_action()\t\t\t#deciding which action to be taken\n else:\n self.decide_action()\t\t\t#printing the else part", "def speak():\n sentences = ['DESTROY ALL HU- I MEAN GREETINGS MEAT BAG',\n 'She sells sea shells by the sea shore', 'Other sentence']\n while True:\n AUDIO.speak(sentences[randint(0, 2)])\n sleep(15)", "def SelectMicrophoneDevice(self, devIndex):\n\n # Maybe a new audio device is included, so create new pyaudio instance.\n self.resetPyAudio()\n self.setUpAudioDevices()\n\n self.defaultMicrophoneIndex = devIndex\n\n if DEBUG_AUDIO_CONTROL2 and devIndex != -1:\n devinfo = self.p.get_device_info_by_index(devIndex)\n self.logger.debug(\"Audio Control2 : SelectMicrophoneDevice - Selected device %s\" % devinfo)", "def set_microphone_sample_rate_to_22khz():\n\n return _update_device_state_bit(_16khz_bit, 0)", "def append_silence(duration_milliseconds=500):\r\n num_samples = duration_milliseconds * (sample_rate / 1000.0)\r\n\r\n for x in range(int(num_samples)): \r\n audio.append(0.0)\r\n\r\n return", "def phone_start(self) -> None:", "def sound_callback(self, data):\n # print \"heard a loud noise!\"\n # print data.data\n sound = data.data.split(\" \")\n print sound[0]\n if float(sound[0]) > .8:\n if self.idling:\n # self.behav_pub.publish(\"greet\")\n self.ok = True\n self.control_pub.publish(\"idle stop; ed stop\")\n print \"STARTING GAME\"\n self.start_game = \"TTT\"\n # elif self.start_game != None:\n # self.ok = True\n # self.control_pub.publish(\"ed stop\")\n\n # self.behav_pub.publish(\"sleep\")\n # self.emotion_pub.publish(\"STARTLE\")", "def set_microphone_sample_rate_to_16khz():\n\n return _update_device_state_bit(_16khz_bit, 1)", "def speak(audio):\n engine.say(audio)\n engine.runAndWait()", "def speak(audio):\n engine.say(audio)\n engine.runAndWait()", "def _synthesize_tone(self, duration_in_msec):\n sample_count = int(float(self.sample_rate) * duration_in_msec * 0.001)\n # There are two bytes per 16-bit sample.\n tmp_buffer = bytearray(sample_count + sample_count)\n fscale = 2.0 * math.pi * self.tone_frequency * self.sample_period;\n # Loop and create the audio samples.\n index = 0\n # Create the rising envelope part of the tone.\n for i, gain in enumerate(self.pulse_shaping_list):\n angle = float(i) * fscale\n value = gain * math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n # Create the level part of the tone. Start at the next\n # sample index so that the phase is a continuous function.\n rising_falling_count = len(self.pulse_shaping_list)\n middle_sample_count = sample_count - (2 * rising_falling_count)\n for i in range(0, middle_sample_count):\n angle = float(i + rising_falling_count) * fscale\n value = math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n # Create the decaying part of the tone. Start at the next\n # sample index so that the phase is a continuous function.\n temp_count = rising_falling_count + middle_sample_count;\n for i, rev_gain in enumerate(self.pulse_shaping_list):\n angle = float(i + temp_count) * fscale\n value = (1.0 - rev_gain) * math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n return tmp_buffer", "def _synthesize_silence(self, duration_in_msec):\n if duration_in_msec > 0.0:\n sample_count = int(float(self.sample_rate) * duration_in_msec * 0.001);\n # There are two bytes per 16-bit sample.\n byte_count = sample_count + sample_count\n tmp_buffer = bytearray(byte_count)\n # Loop and create the audio samples.\n index = 0\n for i in range(0, byte_count):\n tmp_buffer[i] = 0\n return tmp_buffer", "def start(scale, entry, label, v):\r\n\r\n # The following variables are common across all the 5 different voices selected and so, will only be changed there for space considerations\r\n CHANNELS = 1\r\n RATE = 8000\r\n DURATION = 0\r\n WIDTH = 2\r\n BLOCKLEN = 1024\r\n\r\n if len(\r\n entry.get()) == 0: # can try and get rid of invalid characters when saving file too but that won't be necessary\r\n label['text'] = 'File name cannot be empty!'\r\n else:\r\n DURATION = scale.get()\r\n output_wavfile = entry.get()\r\n\r\n label['text'] = 'You will be recording for ' + str(DURATION) + ' seconds.'\r\n\r\n if v.get() == 1:\r\n voice1(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"1\")\r\n elif v.get() == 2:\r\n voice2(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"2\")\r\n elif v.get() == 3:\r\n voice3(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"3. Roger, roger!\")\r\n elif v.get() == 4:\r\n voice4(output_wavfile, DURATION, RATE, WIDTH, CHANNELS)\r\n print(\"4\")\r\n elif v.get() == 5:\r\n manualControl(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"5\")\r\n\r\n # after whatever operation we do\r\n label['text'] = 'Successfully saved ' + output_wavfile + '.wav file'\r\n\r\n pass", "def mic_audio(dur):\n\n audio,b = microphone.record_audio(dur)\n audio = np.hstack([np.frombuffer(i,np.int16) for i in audio])\n return audio", "def microstep(a=0):\n global simulator, recorder\n if simulator is None:\n print \"Program is not started\"\n return\n __record(pc(), microstep, a)\n try:\n simulator.microstep(a)\n except:\n simulation_error()\n exec_hooks(microstep)\n arrows()", "def main():\n # Initialize the pins, and set their numbering scheme\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(PRIMED_LED, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(RECORDING_LED, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(COMPLETE_LED, GPIO.OUT, initial=GPIO.LOW)\n # Convert the list to a channel mask that\n # can be passed as a parameter to the MCC 118 functions.\n global channel_mask\n channel_mask = chan_list_to_mask(channels)\n num_channels = len(channels)\n global options\n options = OptionFlags.EXTTRIGGER # Commands MCC118 to wait for signal on trigger input pin before recording\n trigger_mode = TriggerModes.ACTIVE_HIGH # Commands MCC118 to look for HIGH signal on trigger input pin\n\n try:\n # Select an MCC 118 HAT device to use.\n address = select_hat_device(HatIDs.MCC_118)\n global hat\n hat = mcc118(address)\n \n # Ready LED\n GPIO.output(PRIMED_LED,GPIO.HIGH)\n \n # Terminal Header\n print('\\n\\n///////////////////////////////////////////////////////////////////')\n print('\\n' + ' ' + DAQ_NAME + ' - Finite Data Acquisition with LoRa Trigger @ 50kHz \\n')\n print('///////////////////////////////////////////////////////////////////')\n\n global actual_scan_rate\n actual_scan_rate = hat.a_in_scan_actual_rate(num_channels, scan_rate)\n \n global samples_per_channel \n samples_per_channel = int(recording_length*actual_scan_rate)\n \n # Scan Information to Terminal\n print('\\n\\n*************************************')\n print('\\nSelected Parameters:')\n print(' Channels: ', end='')\n print(', '.join([str(chan) for chan in channels]))\n print(' Requested scan rate (samples/sec/channel): ', scan_rate)\n print(' Actual scan rate (samples/sec/channel): ', actual_scan_rate)\n print(' Options: ', enum_mask_to_string(OptionFlags, options))\n print(' Trigger Mode: ', trigger_mode.name)\n print(' Number of samples/channel requested: ', samples_per_channel)\n print(' Length of recording (seconds): ', samples_per_channel/actual_scan_rate)\n print(' CSV Storage location: ' + mypath)\n print(' DAQ Name: ' + DAQ_NAME)\n print(' DAQ Number: ', DAQ_NUM)\n print(' Total Number of DAQS: ', NUM_OF_DAQS)\n print(' Radio Response Delay Window (seconds): ', RESPONSE_DELAY)\n print(' Current date/time: ',datetime.strftime(datetime.now(), \"%m_%d_%Y, %H:%M:%S\"))\n print('\\n*************************************')\n\n hat.trigger_mode(trigger_mode)\n\n # Prepare MCC118 to start the scan based on above settings.\n hat.a_in_scan_start(channel_mask, samples_per_channel, scan_rate,\n options)\n try:\n # Wait for the external trigger to occur\n wait_for_trigger(hat)\n\n print('\\n (1) Scanning ... Press Ctrl-C to stop')\n \n # Read and save data from MCC118 as it records\n #read_and_display_data(hat, samples_per_channel, num_channels,fileName)\n \n read_and_display_data(hat, samples_per_channel, num_channels)\n\n except KeyboardInterrupt:\n # Clear the '^C' from the display.\n print(CURSOR_BACK_2, ERASE_TO_END_OF_LINE, '\\n')\n hat.a_in_scan_stop()\n GPIO.cleanup()\n quit()\n \n except (HatError, ValueError, KeyboardInterrupt) as err:\n print('\\n', err)\n GPIO.cleanup()\n hat.a_in_scan_stop()", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([500, 610, 780, 1580.] ),\n np.array([590, 680, 890, 1750.] ) )", "def __init__(self, phone):\r\n self.phone = phone\r\n self.currentMaximumVolume = 0 # this will be checked during volume set, if too high volume is wanted for volume control (e.g. 15 when 10 is maximum)\r", "def useSpeech(self):\n # Implements a subprocess to run the Kuri robot simultaneously with the user input loop\n proc_stdin = io.TextIOWrapper(self.proc.stdin, encoding='utf-8', line_buffering=True)\n\n while True:\n prompt = input(\"Type 's' to begin recording! (Type 'q' to quit) \").lower()\n if prompt == 'q':\n proc_stdin.write('q\\n')\n quit()\n if prompt == 's':\n txt = self.sr.getSpeech(\"Recording...\")\n print(\"Finished recording!\")\n if not txt:\n print(\"\\nCould you say that again?\")\n else:\n sentiment = self.sd.getSentiment(txt)\n proc_stdin.write(sentiment + '\\n')\n print(\"Sentiment: \" + sentiment + '\\n')", "def main():\n\n # Play start sound\n play_wave_file(\"start.wav\")\n\n # Connect to Lego Boost\n hub = connect()\n\n # If hub works, starts the main app flow\n if hub:\n speech(\n \"Olá. Eu sou a Faustina, uma robô assistente do ueivespeisse. Em que posso ajudar?\", hub, {})\n while True:\n try:\n act({\"legoAction\": \"colorGreen\"}, hub)\n\n recorded_file = audio.record()\n\n act({\"legoAction\": \"colorRed\"}, hub)\n\n wit_response = wit_client.get_response(recorded_file)\n\n if wit_response[\"_text\"]:\n print(wit_response)\n answer = get_answer(wit_response)\n\n text = add_information_to_text(\n answer) if answer else \"Desculpa, nao entendi o que voce quis dizer\"\n\n speech(text, hub, answer)\n if answer:\n act(answer, hub)\n else:\n act({\"legoAction\": \"colorYellow\"}, hub)\n print(\"No sound detected\")\n time.sleep(2)\n except Exception as exception:\n print(exception)\n\n time.sleep(2)\n hub.motor_external.stop()", "def start_alarm(self):\n self.out_power.pulse()", "def microphone_sensitivity(transferfactor: float) -> float:\n return amp2db(transferfactor/1000.)", "def __init__(self):\n super().__init__(interface.Audio, DEFAULT_PRIORITIES)", "def recognize_async_audio_stream(self, language_code = \"en-US\"): \n if language_code not in self.languages:\n print('\\\"{}\\\" is not a supported language code. Make sure it\\'s supported by Google and try adding adding it to the languages list.\\n'.format(language_code))\n return\n\n self.final_result_queue.queue.clear() # Clear all items in queue for new stream.\n\n config_stream = speech.StreamingRecognitionConfig(\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=self.microphone_handler.RATE,\n language_code=language_code,\n enable_automatic_punctuation=True,\n ),\n interim_results=True \n )\n\n self.microphone_handler.start_recording(streaming=True)\n while self.microphone_handler.streaming:\n data = self.microphone_handler.stream_generator()\n requests = (speech.StreamingRecognizeRequest(audio_content=content) for content in data)\n\n try:\n responses = self.client.streaming_recognize(config_stream, requests)\n for response in responses:\n self.final_result_queue.put(response.results[0])\n if response.results[0].is_final:\n return # Stops more recordings than one. Doesn't halt after recording is done. (temp)\n if self.debug:\n print(response.results[0].alternatives[0].transcript + '\\n') # Print all non final results in terminal(debug).\n except:\n print('Failed to get response.')", "def input_audio_icon():\n if use_espeak_synthesis:\n os.system(\"espeak \\\"Type in\\\"\")", "def _start(self, acqtime):\n self.stopped = False\n NN = 1000\n while not self.stopped:\n self.prepare(acqtime, None, NN)\n for nn in range(NN):\n if self.stopped:\n self.stop()\n break\n self.arm()\n self.start()\n while self.busy():\n time.sleep(.05)", "def retrieve_audio(self):\n peak = Peak(\n registered_at=datetime.utcnow(),\n peak_value=self.noise_peak()\n )\n peak.save()", "def takeCommand():\r\n recognizer = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listenging...\")\r\n audio = recognizer.listen(source)\r\n\r\n try:\r\n print(\"LOADING...\")\r\n command = recognizer.recognize_google(audio, language=\"en-un\")\r\n print(f\"user said: {command}\")\r\n\r\n except Exception as e:\r\n speak(f\"Please say that again\")\r\n command = None\r\n return command", "def play_audio(self):\n if not self.voice.get_busy():\n self.voice.play(self.sound)\n else:\n pass", "def _augment_gain(audio, low=0.5, high=1.5):\n g = low + np.random.random_sample(1) * (high - low)\n return audio * g", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([450, 520, 630, 770., 1550, 2090.] ),\n np.array([ 520, 600, 690, 900., 1750., 2350.] ) )", "def main():\n\n start_program()\n yes_syn_words, no_syn_words, stop_words, record, mp3_filename, text, device_index, output_file = \\\n process_parameter_set()\n stand_alone_flag = process_check_input_argument()\n process_speak_listen(device_index, mp3_filename, text, record, flag=1)\n text = process_name(device_index, mp3_filename, record)\n input_details = process_speak_listen(device_index, mp3_filename, text, record, flag=0)\n response = process_input_details(device_index, input_details, mp3_filename, record, yes_syn_words, no_syn_words,\n stop_words)\n process_output_file_write(output_file, response)\n process_delete_mp3_output_files(stand_alone_flag)\n exit_program()", "def __call__(self, waveforms, telid, selected_gain_channel):", "def setMicrostep(self, on=False):\n cmd_string = 'N{0}'.format(int(on))\n self.sendRcv(cmd_string, execute=True)\n self.microstep = on", "def speech_recognizer_function(self, text_widget):\r\n label_listening = Label(self.root, text=\"listening to input...\",\r\n font=self.text_font, bg=self.bg_color)\r\n label_listening.pack(pady=10)\r\n recognizer = speech_recognition.Recognizer()\r\n microphone = speech_recognition.Microphone()\r\n with microphone as source:\r\n recognizer.adjust_for_ambient_noise(source)\r\n audio = recognizer.listen(source)\r\n try:\r\n text = recognizer.recognize_google(audio)\r\n text += \" \"\r\n except:\r\n text = \"\"\r\n text_widget.insert(END, text)\r\n label_listening.destroy()\r\n self.thread_speech_is_running = False", "def get_audio_features(sample, audio_data, max_len, data_truncating, data_filling, audio_cfg):\n if len(audio_data) > max_len:\n if data_truncating == \"fusion\":\n # fusion\n mel = get_mel(audio_data, audio_cfg)\n # split to three parts\n chunk_frames = max_len // audio_cfg['hop_size']+1 # the +1 related to how the spectrogram is computed\n total_frames = mel.shape[0]\n if chunk_frames == total_frames:\n # there is a corner case where the audio length is\n # larger than max_len but smaller than max_len+hop_size.\n # In this case, we just use the whole audio.\n mel_fusion = np.stack([mel, mel, mel, mel], axis=0)\n longer = [[False]]\n else:\n ranges = np.array_split(list(range(0, total_frames-chunk_frames+1)), 3)\n # print('total_frames-chunk_frames:', total_frames-chunk_frames,\n # 'len(audio_data):', len(audio_data),\n # 'chunk_frames:', chunk_frames,\n # 'total_frames:', total_frames)\n if len(ranges[1]) == 0:\n # if the audio is too short, we just use the first chunk\n ranges[1] = [0]\n if len(ranges[2]) == 0:\n # if the audio is too short, we just use the first chunk\n ranges[2] = [0]\n # randomly choose index for each part\n idx_front = np.random.choice(ranges[0])\n idx_middle = np.random.choice(ranges[1])\n idx_back = np.random.choice(ranges[2])\n # select mel\n mel_chunk_front = mel[idx_front:idx_front+chunk_frames, :]\n mel_chunk_middle = mel[idx_middle:idx_middle+chunk_frames, :]\n mel_chunk_back = mel[idx_back:idx_back+chunk_frames, :]\n\n # shrink the mel\n # Output may differ between torchvision.transforms.Resize and numpy.resize.\n #mel_shrink_torch = torch.from_numpy(mel[None])\n #mel_shrink_torch = torchvision.transforms.Resize(size=[chunk_frames, 64])(mel_shrink_torch)[0]\n #mel_shrink_torch = mel_shrink_torch.to('cpu').detach().numpy().copy()\n mel_shrink_numpy = np.resize(mel[None], (chunk_frames, 64))\n # logging.info(f\"mel_shrink.shape: {mel_shrink.shape}\")\n\n # stack\n mel_fusion = np.stack([mel_chunk_front, mel_chunk_middle, mel_chunk_back, mel_shrink_numpy], axis=0)\n longer = [[True]]\n # random crop to max_len (for compatibility)\n overflow = len(audio_data) - max_len\n idx = np.random.randint(0, overflow + 1)\n audio_data = audio_data[idx: idx + max_len]\n\n else: # padding if too short\n if len(audio_data) < max_len: # do nothing if equal\n if data_filling == \"repeatpad\":\n n_repeat = int(max_len/len(audio_data))\n audio_data = np.tile(audio_data, n_repeat)\n # audio_data = audio_data.unsqueeze(0).unsqueeze(0).unsqueeze(0)\n # audio_data = F.interpolate(audio_data,size=max_len,mode=\"bicubic\")[0,0,0]\n audio_data = np.pad(audio_data, [(0, max_len - len(audio_data))], \"constant\")\n elif data_filling == \"pad\":\n audio_data = np.pad(audio_data, [(0, max_len - len(audio_data))], \"constant\")\n elif data_filling == \"repeat\":\n n_repeat = int(max_len/len(audio_data))\n audio_data = np.tile(audio_data, n_repeat+1)[:max_len]\n \n if data_truncating == 'fusion':\n mel = get_mel(audio_data, audio_cfg)\n mel_fusion = np.stack([mel, mel, mel, mel], axis=0)\n longer = [[False]]\n\n return longer, mel_fusion, audio_data", "def start(self):\n self.stop_recognising.clear()\n self.thread.start()", "def start(update,context):\r\n update.message.reply_text('welcome to voice bot')", "def enter_pending_start(self, count=10):\n self.app.pingWebSessions()\n\n if count == 0:\n self.app.admin.hangup(self.channel)\n\n d = self.agi.streamFile(\"weareforests-audio/welcome\", chr(self.digit))\n def audioDone(r):\n digit, offset = r\n if digit == self.digit:\n self.setStateAfterSample(\"start\", \"weareforests-audio/shortsilence\")\n else:\n self.state.set(\"pending_start\", count-1)\n d.addCallback(audioDone)\n d.addErrback(self.catchHangup)", "def listen2(self):\n with self.m as source:\n self.r.adjust_for_ambient_noise(source, duration=0.5)\n audio = self.r.listen(source)\n flac_data = audio.get_flac_data(\n convert_rate=None if audio.sample_rate >= 16000 else 16000,\n # audio samples should be at least 16 kHz\n convert_width=None if audio.sample_width >= 2 else 2 # audio samples should be at least 16-bit\n )\n\n try:\n print(json.dumps(self.speech_to_text.recognize(flac_data, content_type='audio/flac',\n customization_id=\"3a2e04c0-5346-11e7-aeaf-57afcb850a3a\",\n model=None), indent=4))\n except sr.UnknownValueError:\n print(str(-1))\n except sr.RequestError:\n print(str(404))", "def start_transcribing():\n transcribe.main()", "def recorder():\n # Following block gets rid of annoying config errors by ALSA\n def py_error_handler(filename, line, function, err, fmt):\n pass\n ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)\n c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)\n asound = cdll.LoadLibrary('libasound.so')\n asound.snd_lib_error_set_handler(c_error_handler) \n\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n yield stream\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def create_play_audio_thread(self):\n self.sound_thread = threading.Thread(target=self.play_audio)", "def voiceSearch(self):\n\n # Set the device ID of the mic that we\n # specifically want to use to avoid ambiguity\n for i, microphone_name in enumerate(\n sr.Microphone.list_microphone_names()):\n if(microphone_name == MIC_NAME):\n device_id = i\n break\n\n # obtain audio from the microphone\n r = sr.Recognizer()\n with sr.Microphone(device_index=device_id) as source:\n # clear console of errors\n subprocess.run(\"clear\")\n\n # wait for a second to let the recognizer adjust the\n # energy threshold based on the surrounding noise level\n r.adjust_for_ambient_noise(source)\n\n print(\"Say something to search for books: \")\n try:\n audio = r.listen(source, timeout=1.5)\n except sr.WaitTimeoutError:\n return None\n\n # recognize speech using Google Speech Recognition\n translation = None\n try:\n translation = r.recognize_google(audio)\n except(sr.UnknownValueError, sr.RequestError):\n pass\n finally:\n return translation", "def main():\n\n # Parse arguments\n parser = OptionParser()\n parser.add_option('-n', '--subscription_key', dest='subscription_key',\n help='subscription_key for authentication')\n parser.add_option('-t', '--text', dest='text',\n help='text to synthesize')\n parser.add_option('-l', '--language', dest='language',\n help='language')\n parser.add_option('-g', '--gender', dest='gender',\n help='gender')\n parser.add_option('-d', '--directory', dest='directory',\n help='directory to store the file')\n (options, args) = parser.parse_args()\n subscription_key = options.subscription_key\n text = options.text\n language = options.language\n gender = options.gender\n directory = options.directory\n\n # Perform sanity checks on options\n validate_options(subscription_key, text)\n\n if not directory:\n directory = default_directory\n\n if not language:\n language = default_language\n\n if not gender:\n gender = default_gender\n\n # format = 'riff-16khz-16bit-mono-pcm'\n format = 'riff-8khz-8bit-mono-mulaw'\n\n # lang = 'en-AU'\n # gender = 'Female'\n tts_msspeak = MSSpeak(subscription_key, '/tmp/')\n tts_msspeak.set_cache(False)\n output_filename = tts_msspeak.speak(text, language, gender, format)\n\n print 'Recorded TTS to %s%s' % (directory, output_filename)", "def audio(self):\n self.log_string += 'Audio file'\n self._media_processing()", "def main():\n first_notes_and_song()\n bad_singing()\n using_a_sensor_to_block()", "def hear_answer(tts, speech_recognition, memory, cur_time):\n speech_recognition.setVocabulary(numbers, False)\n tts.say(\"\")\n answer = \"\"\n memory.subscribeToEvent(\"TouchChanged\",\n \"ReactToTouch\",\n \"onTouched\")\n while answer == \"\":\n if touched:\n speech_recognition.subscribe(\"GET_ANSWER\")\n print('Speech recognition engine started')\n speech_recognition.pause(False)\n time.sleep(3.0)\n speech_recognition.pause(True)\n answer = memory.getData(\"WordRecognized\")\n print(\"data: %s\" % answer)\n # Confidence must be bigger than 0.5 in order to continue\n if answer[1] < 0.45:\n answer = \"\"\n else:\n answer = str(answer[0])\n speech_recognition.unsubscribe(\"GET_ANSWER\")\n if answer == \"\":\n no_answer(tts, randint(0, 3))\n set_touched(False)\n elif not warned and datetime.datetime.now() > (cur_time + datetime.timedelta(minutes=3)):\n global warned\n warned = True\n tts.say(\"Je werkt nu 3 minuten aan deze som. Fouten maken mag. Het is niet erg als je het antwoord niet weet. Zeg maar gewoon wat je denkt.\")\n memory.unsubscribeToEvent(\"TouchChanged\",\n \"ReactToTouch\")\n global warned\n warned = False\n return answer", "def __init__(self, power, FWHM_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm) \n self.set_time_window_ps(time_window_ps)\n\n T0_ps = FWHM_ps/3.7909885\n ### Generate pulse\n if not power_is_avg:\n # numpy.sinc is sin(pi*x)/(pi*x), so we divide by pi\n self.set_AT( np.sqrt(power) * np.sinc(self.T_ps/(T0_ps*np.pi)) ) \n else:\n self.set_AT( 1 / np.sinc(np.pi * self.T_ps/(T0_ps*np.pi)) )\n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n \n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def prepare_recording(self, example):\n audio = load_audio(example[self.audio_key], dtype=np.float32)\n assert audio.shape[0] >= 2\n if audio.shape[0] > 2:\n if 'mic_pair' in example.keys():\n mic_pair = example['mic_pair']\n audio = audio[np.asarray(mic_pair)]\n else:\n audio = audio[np.asarray(self.mic_pair)]\n recording_len = audio.shape[-1]\n start_offset = 0\n end = recording_len\n if \"offset\" in example.keys() and \"onset\" in example.keys():\n end = example['offset']\n start_offset = example['onset']\n if end <= self.signal_length + start_offset:\n # current recording has not the sufficient length for the specified\n # desired signal length\n # example is skipped later during prefetch or catch\n raise FilterException\n if not self.random_speech_samples:\n slice_start = str_to_random_generator(\n example[\"example_id\"]).integers(\n start_offset, end - self.signal_length)\n else:\n slice_start = np.random.randint(\n start_offset, recording_len - self.signal_length)\n prepared_audio = audio[:, slice_start:slice_start + self.signal_length]\n return prepared_audio", "def convert2mel(audio,base_path,fs, n_fft,fmax,n_mels,hop_length_samples, window_lenght,type_training):\n\n path = os.path.join(base_path, audio)\n if type_training != \"train\":\n if os.path.isfile(os.path.join(base_path,\"processed_wavs_train\",audio)):\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_train\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_test\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data, _ = librosa.core.load(path, sr=fs, res_type=\"kaiser_best\")\n data = normalize_amplitude(data)\n\n powSpectrum = np.abs(stft(data+ 0.00001,n_fft,hop_length = hop_length_samples, win_length = window_lenght, window = windowing(window_lenght, sym=False), center=True, pad_mode='reflect'))**2\n\n mels = melspectrogram(y= None,n_fft=n_fft ,sr=fs ,S= powSpectrum, hop_length= hop_length_samples ,n_mels=n_mels,fmax=fmax , fmin = 0.0).T\n mels = librosa.core.power_to_db(mels, ref=np.min(mels))\n mels = mels / np.max(mels)\n\n return mels.T", "def setMicInputGain(self, channel, gain, unitCode=0):\n resp = self.XAPCommand('MLINE', channel, gain, unitCode=unitCode)\n return int(resp)", "def start(self):\n self._call = LoopingCall.withCount(self._update)\n self._call.clock = reactor\n self._call.start(1.0 / self.framerate, now=False)\n self._running = True", "def stream_inference_of_microphone_audio(args):\n with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,\n blocksize=samples_buffer_block_size,\n samplerate=samplerate):\n with tf.Session() as sess:\n tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], args.model_dir)\n predictor = tf.contrib.predictor.from_saved_model(args.model_dir)\n\n network_spec_w = args.model_input_width\n spectrogram_predictions = np.zeros((spec_buffer_w + spec_buffer_pad, 3))\n spectrogram_predictions_ma = np.zeros((spec_buffer_w + spec_buffer_pad, 3))\n \n # display buffer that can be overwritten with every new display\n display_predictions = np.stack([np.arange(spec_buffer_w), np.zeros(spec_buffer_w)]).astype(int).T\n frame = np.zeros((spec_buffer_h, spec_buffer_w, 3), dtype=np.uint8)\n\n alpha = 0.025\n N = 90\n myfilt = alpha*((1-alpha)**np.arange(0,N))\n myfilt /= myfilt[:60].sum()\n\n last_pred_write = 0\n\n perf = MovingWindowPerf()\n while True:\n # sleep(0.01) # restrict max fps to 100\n imageify = spec_buffer[:,spec_buffer_pad:].copy()\n imageify = (imageify - imageify.min()) / (1e-5 + imageify.max() - imageify.min())\n imageify = (imageify * 255).astype(np.uint8)\n frame[:,:,0] = imageify\n frame[:,:,1] = imageify\n frame[:,:,2] = imageify\n\n idx_now = spec_buffer_p % spec_buffer_w\n # we look into the past\n se = idx_now + spec_buffer_pad\n ss = se - network_spec_w\n\n next_input = np.expand_dims(spec_buffer[:, ss:se], 0)\n\n prediction = predictor({\"spectrograms\": next_input })['softmax']\n perf.tick()\n prediction = prediction[0] # batch size of one\n \n spectrogram_predictions[last_pred_write:se,:] = prediction[-1,:] # write latest prediction\n latest_ma = spectrogram_predictions[(se-ma_width):se,2].mean()\n spectrogram_predictions_ma[last_pred_write:se,:] = latest_ma # write the latest moving average\n last_pred_write = se\n pred_class = np.argmax(prediction[-1,:])\n \n # erase the future\n spectrogram_predictions[se+1:] = 0\n spectrogram_predictions_ma[se+1:] = 0\n\n # play a bell on WW detection\n if latest_ma >= args.detection_threshold:\n bell.play(device=args.device)\n\n ### display code\n white = (255,255,255)\n blue = (255,0,0)\n red = (0,0,255)\n green = (0,255,0)\n colors = [green, blue, red]\n activities = ['voice', 'silence', 'alexa']\n\n for i, color in enumerate(colors):\n display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)\n cv2.polylines(frame, [display_predictions], isClosed=False, color=color)\n\n # display moving average\n display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions_ma[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)\n cv2.polylines(frame, [display_predictions], isClosed=False, color=white) \n\n cv2.line(frame, (idx_now, 0), (idx_now, spec_buffer_h), green, 2) # moving vertical line\n thresh_display_height = spec_buffer_h - int(args.detection_threshold * spec_buffer_h)\n cv2.line(frame, (0, thresh_display_height), (spec_buffer_w, thresh_display_height), white, 2) # horizontal line\n \n __draw_label(frame, activities[pred_class], colors[pred_class], (spec_buffer_w//2, 0))\n __draw_label(frame, perf.fps_str('inferences/sec'), green)\n\n cv2.imshow(\"Press 'q' to quit\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())" ]
[ "0.68419266", "0.64319956", "0.62581193", "0.6240901", "0.6077823", "0.6044454", "0.6029799", "0.5960143", "0.5955317", "0.59541893", "0.5915512", "0.5875549", "0.57656145", "0.56401867", "0.5621901", "0.56032926", "0.55882573", "0.5567367", "0.5551472", "0.55207324", "0.551124", "0.5480386", "0.54777855", "0.5470307", "0.54405254", "0.54348415", "0.5425399", "0.5423134", "0.5421977", "0.5407534", "0.5404506", "0.5401366", "0.5390827", "0.5389789", "0.53881097", "0.5376065", "0.53632975", "0.5360599", "0.5345099", "0.5341677", "0.5336457", "0.53355104", "0.5331967", "0.53231996", "0.53078246", "0.5303938", "0.5302872", "0.52853155", "0.528363", "0.52770764", "0.52669585", "0.52669585", "0.52631897", "0.5237934", "0.5233343", "0.5226056", "0.5223017", "0.5208677", "0.5208601", "0.5207957", "0.5205293", "0.5202543", "0.519055", "0.5190117", "0.5185623", "0.5175315", "0.5155267", "0.5148948", "0.5136718", "0.51218396", "0.5117624", "0.5116834", "0.5111784", "0.51089066", "0.5108054", "0.50906044", "0.50891817", "0.50824463", "0.5081222", "0.5070098", "0.5066652", "0.5062928", "0.50625736", "0.5056142", "0.50358826", "0.5033594", "0.5025552", "0.50134295", "0.5012958", "0.5009002", "0.50081897", "0.50048715", "0.5004285", "0.5003669", "0.50016755", "0.4990461", "0.4988123", "0.49859816", "0.498219", "0.49810827" ]
0.59551054
9
This function will start the microphone and listen for an answer to be provided and extract the answer from it.
def listen_and_predict(self, online = False, key=None, verbose=False): with sr.Microphone(sample_rate=48000) as source: audio = self.recorder.listen(source) # recognize speech using Sphinx print('predicting text') try: if online: text = self.recorder.recognize_google(audio, key=key) else: text = self.recorder.recognize_sphinx(audio) except sr.UnknownValueError: return 'Model could not understand the audio' except sr.RequestError as e: return 'Error when predicting the text' text_split = text.split() # acceptable responses for yes and no acceptable_yes = set(['yes', 'yet']) acceptable_no = set(['no', 'know']) if verbose: print(f'Model predicted: {text}') if bool(set(text_split).intersection(acceptable_yes)) and bool(set(text_split).intersection(acceptable_no)): return 'unkown' elif bool(set(text_split).intersection(acceptable_yes)): return 'yes' elif bool(set(text_split).intersection(acceptable_no)): return 'no' return 'unkown'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mic_input():\n try:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print('Say something...')\n r.pause_threshold = 1\n r.adjust_for_ambient_noise(source, duration=1)\n audio = r.listen(source)\n try:\n command = r.recognize_google(audio).lower()\n print('You said: ' + command + '\\n')\n except sr.UnknownValueError:\n print('....')\n command = self.mic_input()\n return command\n except Exception as e:\n print(e)\n return False", "def takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source: #don't forget the () after microphone\n print(\"Listening ...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing..\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n print(e)\n print(\"Say that again please..\")\n return \"None\"\n return query", "def takecommand():\r\n\r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"listening....\")\r\n r.pause_threshold=1\r\n \"\"\" Pause_threshold will let you to speak with your own pace\"\"\"\r\n\r\n #r.energy_threshold=500\r\n \"\"\" energy threshold will stop hindrens from outside\"\"\"\r\n\r\n audio=r.listen(source)\r\n\r\n try:\r\n print(\"In process of recognizing..\")\r\n query=r.recognize_google(audio,language=\"en-in\")\r\n \"\"\" query will take date that has been spoken by user with the help of google API\"\"\"\r\n print(\"you said :\",query)\r\n\r\n except Exception as e:\r\n print(\"can you speak this again\")\r\n return \"none\"\r\n return query", "def hear_speech():\n print('Please speak and wait...')\n while (1):\n try:\n with sr.Microphone() as source2:\n # print('Please wait while we adjust the surrounding noise.')\n r.adjust_for_ambient_noise(source2, duration=0.2)\n # listens for the user's input\n audio2 = r.listen(source2)\n data = r.recognize_google(audio2)\n\n except sr.UnknownValueError:\n data = 0\n if data != 0:\n print('Recognizing...')\n return data", "def take_command(self):\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening.....\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n try:\r\n query = r.recognize_google(audio, language=\"en-in\")\r\n print(\"Recognizing.....\")\r\n print(\"Query=\", query)\r\n except Exception as e :\r\n print(e)\r\n self.speak(\"Say that again please....\")\r\n return \"None\"\r\n return query", "def takecommand():\n r = src.Recognizer()\n with src.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"user said: {query}\")\n\n except Exception as e:\n speak(\"Sorry, Can You repeat this please\")\n query = None\n return query\n return query", "def takeCommand():\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n print(\"Recognizing... \")\n voice_input = r.recognize_google(audio, language=\"en-US\")\n print(f\"The user said: {voice_input}\\n\")\n except Exception as e:\n # print(e)\n print(\"Please say that again\")\n return \"None\"\n return voice_input", "def takeCommand():\r\n r=sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n print(\"Listening....\")\r\n r.pause_threshold = 1 #pause threshold is if we pause in between speaking it shouldnt consider the sentence as complete\r\n audio = r.listen(source)\r\n\r\n try:\r\n print(\"Recognizing...\")\r\n query= r.recognize_google(audio,language='en-in')\r\n print(f\"User said: {query} \\n\")\r\n\r\n except Exception as e:\r\n print(e)\r\n print(\"Please say that again...\")\r\n return \"None\"\r\n\r\n\r\n return query", "def speech_recognize_from_microphone():\n speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)\n speech_config.request_word_level_timestamps()\n speech_config.output_format = speechsdk.OutputFormat(1)\n\n speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)\n\n done = False\n\n def stop_cb(evt):\n \"\"\"callback that signals to stop continuous recognition upon receiving an event `evt`\"\"\"\n print('CLOSING on {}'.format(evt))\n nonlocal done\n done = True\n\n def recognized_cb(evt):\n \"\"\"callback for recognized event\"\"\"\n if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech:\n #print('RECOGNIZED: {}'.format(evt.result.text))\n #print('All params: {}'.format(evt.result))\n #print(evt.result.json)\n response = json.loads(evt.result.json)\n #print('All params: {}'.format(response))\n Text = response[\"DisplayText\"]\n duration = 0;\n for word in response[\"NBest\"][0][\"Words\"]:\n duration += word[\"Duration\"]\n duration = duration / 10000000\n print(\"dur :\"+str(duration)+\" text: \" + Text)\n\n # Connect callbacks to the events fired by the speech recognizer\n speech_recognizer.recognized.connect(recognized_cb)\n speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))\n speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))\n speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))\n # stop continuous recognition on either session stopped or canceled events\n speech_recognizer.session_stopped.connect(stop_cb)\n speech_recognizer.canceled.connect(stop_cb)\n\n # Start keyword recognition\n speech_recognizer.start_continuous_recognition()\n\n while not done:\n time.sleep(.5)\n\n speech_recognizer.stop_continuous_recognition()", "def m() -> str:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.adjust_for_ambient_noise(source)\n logger.info(\"Microphone Active! Waiting for prompt!\")\n audio = r.listen(source)\n\n s = r.recognize_google(audio) #Send the audio to google\n result = s.lower()\n return result", "def start(self):\n self.kb_client.subscribe(self.kb_ID, {\"_data\": {\"tag\": TAG_ANSWER, \"text\": \"$input\", \"timestamp\": \"$time\", \"language\": \"$lang\"}}, self.add_emotion) # from the 'gnlp' module", "def takeCommand():\r\n recognizer = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listenging...\")\r\n audio = recognizer.listen(source)\r\n\r\n try:\r\n print(\"LOADING...\")\r\n command = recognizer.recognize_google(audio, language=\"en-un\")\r\n print(f\"user said: {command}\")\r\n\r\n except Exception as e:\r\n speak(f\"Please say that again\")\r\n command = None\r\n return command", "def init_speech():\n program = True\n while program is True:\n print('Listening...')\n with sr.Microphone() as source:\n audio = r.listen(source)\n\n try:\n command = r.recognize_google(audio)\n print(command)\n except:\n continue\n\n if command in ['quit', 'exit', 'exits', 'exxat', 'bye', 'by' 'good-by', 'goodbye']:\n program = False\n play_audio('./audio/sentnc16.wav')\n break\n\n cmmd.discover(command)", "def get_user_speech_input(self):\n\t\twith sr.Microphone() as source:\n\t\t\tprint \"You can speak!\"\n\t\t\taudio = self.recog.listen(source, 5)\n\t\t\t\n\t\t#WIT_AI_KEY = \"4KKA5EH6VFWPMWYZTSFHNJJZYCZHGTAQ\"\n\t\tprint \"sending it\"\n\t\ttry:\n\t\t\tprint \"Google thinks: \" + self.recog.recognize_google(audio)\n\t\texcept sr.UnknownValueError:\n\t\t\tprint(\"Google Speech Recognition could not understand audio\")\n\t\texcept sr.RequestError as e:\n\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e))", "def hear_answer(tts, speech_recognition, memory, cur_time):\n speech_recognition.setVocabulary(numbers, False)\n tts.say(\"\")\n answer = \"\"\n memory.subscribeToEvent(\"TouchChanged\",\n \"ReactToTouch\",\n \"onTouched\")\n while answer == \"\":\n if touched:\n speech_recognition.subscribe(\"GET_ANSWER\")\n print('Speech recognition engine started')\n speech_recognition.pause(False)\n time.sleep(3.0)\n speech_recognition.pause(True)\n answer = memory.getData(\"WordRecognized\")\n print(\"data: %s\" % answer)\n # Confidence must be bigger than 0.5 in order to continue\n if answer[1] < 0.45:\n answer = \"\"\n else:\n answer = str(answer[0])\n speech_recognition.unsubscribe(\"GET_ANSWER\")\n if answer == \"\":\n no_answer(tts, randint(0, 3))\n set_touched(False)\n elif not warned and datetime.datetime.now() > (cur_time + datetime.timedelta(minutes=3)):\n global warned\n warned = True\n tts.say(\"Je werkt nu 3 minuten aan deze som. Fouten maken mag. Het is niet erg als je het antwoord niet weet. Zeg maar gewoon wat je denkt.\")\n memory.unsubscribeToEvent(\"TouchChanged\",\n \"ReactToTouch\")\n global warned\n warned = False\n return answer", "def process_speak_listen(device_index, mp3_filename, text, record, flag):\n\n mp3_filename = mp3_filename + \".mp3\"\n try:\n tts = gTTS(text=text, lang='en', slow=False)\n tts.save(mp3_filename)\n playsound(mp3_filename)\n os.remove(mp3_filename)\n\n if flag != 1:\n with sr.Microphone(device_index=device_index) as source:\n record.adjust_for_ambient_noise(source, duration=1)\n print(\"Speak:\")\n os.system(\"zenity --progress --width=400 --height=200 --title='Speak Now' \"\n \"--text='Speak Now......No need to click OK button' --no-cancel &\")\n try:\n audio = record.listen(source, timeout=5)\n text = record.recognize_google(audio)\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(text)\n except LookupError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : LookupError - Could not able to understand\")\n text = None\n except speech_recognition.WaitTimeoutError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : WaitTimeoutError - Could not able to listen anything for 5 seconds\")\n text = None\n except speech_recognition.UnknownValueError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : UnknownValueError - Could not able to listen anything for 5 seconds\")\n text = None\n except gtts.tts.gTTSError:\n print(\"ERROR : Connection Error : No internet connection.\")\n exit_program()\n except PermissionError:\n print(\"ERROR : No permission\")\n exit_program()\n\n return text", "def listen(self):\n with self.m as source:\n self.r.adjust_for_ambient_noise(source, duration=0.5)\n audio = self.r.listen(source)\n\n try:\n return self.r.recognize_ibm(audio, username=self.IBM_USERNAME,\n password=self.IBM_PASSWORD, show_all=False)\n except sr.UnknownValueError:\n return str(-1)\n except sr.RequestError:\n return str(404)", "def run(self) -> None:\n self.microphone.start()\n try:\n self._run()\n finally:\n self.microphone.stop()", "def handle_mic_listen(_):\n loop.responsive_recognizer.trigger_listen()", "def start_record_microphone(self):\n if not os.path.exists(self.audio_file_folder):\n os.makedirs(self.audio_file_folder)\n\n self.microphone_handler.start_recording()\n self.current_session.put(self.microphone_handler.current_session)", "def _recognise_speech() -> None:\n recogniser: Recogniser = SpeechRecogniser(\n JackRobot(\n SpeechEngine(\n )\n )\n )\n\n while True:\n recogniser.run()", "def record_audio():\n voiceObj = voice_rec()\n text = voiceObj.start() \n return text", "def voice_recognizer():\n while dr.ttsIsSpeaking().result or dr.mediaIsPlaying().result:\n time.sleep(1)\n return dr.recognizeSpeech().result", "def start( self ):\n\t\treturn self.agi.answer().addCallbacks( self.onAnswered, self.answerFailure )", "def useSpeech(self):\n # Implements a subprocess to run the Kuri robot simultaneously with the user input loop\n proc_stdin = io.TextIOWrapper(self.proc.stdin, encoding='utf-8', line_buffering=True)\n\n while True:\n prompt = input(\"Type 's' to begin recording! (Type 'q' to quit) \").lower()\n if prompt == 'q':\n proc_stdin.write('q\\n')\n quit()\n if prompt == 's':\n txt = self.sr.getSpeech(\"Recording...\")\n print(\"Finished recording!\")\n if not txt:\n print(\"\\nCould you say that again?\")\n else:\n sentiment = self.sd.getSentiment(txt)\n proc_stdin.write(sentiment + '\\n')\n print(\"Sentiment: \" + sentiment + '\\n')", "def ask(self, question):\n\n\t\t# If you're just trying to test voice detection, you can uncomment\n\t\t# the following 5 lines. Bobby will guess \"yellow flashlight\" and will prompt\n\t\t# you to correct him by saying \"blue flashlight\"\n\n\t\t# fake_answers = [\"no\", \"yes\", \"yes\", \"yes\", \"no\", \"yes\", \"yes\"]\n\t\t# global count\n\t\t# count += 1\n\t\t# print question\n\t\t# return fake_answers[count - 1]\n\n\t\t# self.say(question)\n\t\t# #starts listening for an answer\n\t\t# self.asr.subscribe(\"TEST_ASR\")\n\t\t# data = (None, 0)\n\t\t# while not data[0]:\n\t\t# \tdata = self.mem.getData(\"WordRecognized\")\n\t\t# #stops listening after he hears yes or no\n\t\t# self.asr.unsubscribe(\"TEST_ASR\")\n\t\t#\n\t\t# print data\n\t\t#\n\t\t# for word in self.yes_no_vocab:\n\t\t# \tfor syn in self.yes_no_vocab[word]:\n\t\t# \t\tif data[0] == syn:\n\t\t# \t\t\treturn word", "def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))", "def request_endpoint(audio, speech_config, output_directory, lexical):\n audio_config = speechsdk.audio.AudioConfig(filename = audio)\n speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)\n result = speech_recognizer.recognize_once()\n filename = audio[audio.rindex('\\\\')+1:]\n text = process_recognition(result, filename, output_directory, lexical)\n return text, filename", "def StartMicrophone(self):\n if not os.path.exists('static'):\n os.mkdir('static')\n microphone = olpc.Microphone('static/sound.ogg')\n microphone.StartMicrophone()", "def start(self):\n while True:\n #requests.get(\"http://localhost:8080/clear\")\n if use_launch_phrase:\n recognizer, audio = self.speech.listen_for_audio()\n if self.speech.is_call_to_action(recognizer, audio):\n self.__acknowledge_action()\n self.decide_action()\n else:\n self.decide_action()", "def speech_recognizer_function(self, text_widget):\r\n label_listening = Label(self.root, text=\"listening to input...\",\r\n font=self.text_font, bg=self.bg_color)\r\n label_listening.pack(pady=10)\r\n recognizer = speech_recognition.Recognizer()\r\n microphone = speech_recognition.Microphone()\r\n with microphone as source:\r\n recognizer.adjust_for_ambient_noise(source)\r\n audio = recognizer.listen(source)\r\n try:\r\n text = recognizer.recognize_google(audio)\r\n text += \" \"\r\n except:\r\n text = \"\"\r\n text_widget.insert(END, text)\r\n label_listening.destroy()\r\n self.thread_speech_is_running = False", "def handle(text, mic, profile):\n #it heard the word Music\n mic.say(\"You said \" + text)\n\n #contact the hub requesting a file (NAMED songoptions.txt that overwrites) containg 3 random songs and numbers on the same line\n #hubmusic.getoptions()\n\n #for line in file, read out the line which will be (1 jayz - brush your shoulders off ....) \n with open(\"songoptions.txt\", \"r\") as searchfile:\n for line in searchfile:\n mic.say(line.strip())\n\n #listen for user input\n #if user chooses a valid number, send that number to the HUB and the HUB will send over that song\n #play the song\n\n #probably import hubmusic and in there function playsong. \n #rasp.toggleLamp(veraIP, text.lower())", "def main():\n\n start_program()\n yes_syn_words, no_syn_words, stop_words, record, mp3_filename, text, device_index, output_file = \\\n process_parameter_set()\n stand_alone_flag = process_check_input_argument()\n process_speak_listen(device_index, mp3_filename, text, record, flag=1)\n text = process_name(device_index, mp3_filename, record)\n input_details = process_speak_listen(device_index, mp3_filename, text, record, flag=0)\n response = process_input_details(device_index, input_details, mp3_filename, record, yes_syn_words, no_syn_words,\n stop_words)\n process_output_file_write(output_file, response)\n process_delete_mp3_output_files(stand_alone_flag)\n exit_program()", "def listen2(self):\n with self.m as source:\n self.r.adjust_for_ambient_noise(source, duration=0.5)\n audio = self.r.listen(source)\n flac_data = audio.get_flac_data(\n convert_rate=None if audio.sample_rate >= 16000 else 16000,\n # audio samples should be at least 16 kHz\n convert_width=None if audio.sample_width >= 2 else 2 # audio samples should be at least 16-bit\n )\n\n try:\n print(json.dumps(self.speech_to_text.recognize(flac_data, content_type='audio/flac',\n customization_id=\"3a2e04c0-5346-11e7-aeaf-57afcb850a3a\",\n model=None), indent=4))\n except sr.UnknownValueError:\n print(str(-1))\n except sr.RequestError:\n print(str(404))", "def speak(audio):\n engine.say(audio)\n engine.runAndWait()", "def speak(audio):\n engine.say(audio)\n engine.runAndWait()", "def recognize_speech_from_mic(recognizer, microphone):\n # check that recognizer and microphone arguments are appropriate type \n if not isinstance(recognizer, sr.Recognizer):\n raise TypeError(\"`recognizer` must be `Recognizer` instance\")\n\n if not isinstance(microphone, sr.Microphone):\n raise TypeError(\"`microphone` must be `Microphone` instance\")\n\n # adjust the recognizer sensitivity to ambient noise and record audio \n # from the microphone \n with microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n audio = recognizer.listen(source)\n\n # set up the response object \n response = {\n \"success\": True,\n \"error\": None,\n \"transcription\": None\n }\n\n # try recognizing the speech in the recording \n # if a RequestError or UnknownValueError exception is caught, \n # update the response object accordingly \n try:\n response[\"transcription\"] = recognizer.recognize_google(audio)\n except sr.RequestError:\n # API was unreachable or unresponsive \n response[\"success\"] = False\n response[\"error\"] = \"API unavailable\"\n except sr.UnknownValueError:\n # speech was unintelligible \n response[\"error\"] = \"Unable to recognize speech\"\n\n return response", "def recognize_speech_from_mic(recognizer, microphone):\n # check that recognizer and microphone arguments are appropriate type\n if not isinstance(recognizer, sr.Recognizer):\n raise TypeError(\"`recognizer` must be `Recognizer` instance\")\n\n if not isinstance(microphone, sr.Microphone):\n raise TypeError(\"`microphone` must be `Microphone` instance\")\n\n # adjust the recognizer sensitivity to ambient noise and record audio\n # from the microphone\n with microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n audio = recognizer.listen(source)\n\n # set up the response object\n response = {\n \"success\": True,\n \"error\": None,\n \"transcription\": None\n }\n\n # try recognizing the speech in the recording\n # if a RequestError or UnknownValueError exception is caught,\n # update the response object accordingly\n try:\n response[\"transcription\"] = recognizer.recognize_google(audio, language=LANG)\n except sr.RequestError:\n # API was unreachable or unresponsive\n response[\"success\"] = False\n response[\"error\"] = \"API unavailable\"\n except sr.UnknownValueError:\n # speech was unintelligible\n response[\"error\"] = \"Unable to recognize speech\"\n\n return response", "def act(self, audio_file=None):\n #file as source\n if self.src == 'file':\n if audio_file is None:\n raise ValueError(\"Please provide a audio_file\")\n return None\n elif not os.path.exists(audio_file):\n raise FileNotFoundError(\"Specified file not found\")\n return None\n else:\n file = speech_recognition.AudioFile(audio_file)\n with file:\n speech = self.recog_obj.record(file)\n \n #mic as source\n elif self.src == 'mic':\n if audio_file is not None:\n print(\"WARNING: source is set to device microphone. Audio file will be ignored\\n\")\n \n try:\n with self.mic_obj:\n print(\"Speak into the mic....\\n\")\n self.recog_obj.adjust_for_ambient_noise(self.mic_obj)\n speech = self.recog_obj.listen(self.mic_obj)\n #if microphone is not detected\n except OSError:\n print(\"Error: Microphone not detected\")\n return None\n \n \n try:\n print(\"Please wait while we transcribe...\\n\")\n text = self.recog_obj.recognize_google(speech, language='en', show_all=self.debug)\n \n #if audio is not detected\n except speech_recognition.UnknownValueError:\n print(\"Error: Sorry audio not detected by device microphone\")\n return None\n \n #if there is connection issue or api issue\n except speech_recognition.RequestError:\n print(\"Error: API for transcription is not reachable. There may be some connection issue or server side issue\")\n return None\n \n #for imposing various rules to text \n #But if debug mode is enabled, transcript variable will store a dictionary of various transcriptions \n #along with their confidence probabilities, so conversion rules are disabled meanwhile \n transcript = self.tcr.deconcat(text) if not self.debug else text\n return transcript", "def on_vader_start(ob, message):\n text='\"Please start speaking\"'\n subprocess.call('espeak '+ text, shell=True)\n logging.debug(\"Listening...\")", "def main():\n\n # Play start sound\n play_wave_file(\"start.wav\")\n\n # Connect to Lego Boost\n hub = connect()\n\n # If hub works, starts the main app flow\n if hub:\n speech(\n \"Olá. Eu sou a Faustina, uma robô assistente do ueivespeisse. Em que posso ajudar?\", hub, {})\n while True:\n try:\n act({\"legoAction\": \"colorGreen\"}, hub)\n\n recorded_file = audio.record()\n\n act({\"legoAction\": \"colorRed\"}, hub)\n\n wit_response = wit_client.get_response(recorded_file)\n\n if wit_response[\"_text\"]:\n print(wit_response)\n answer = get_answer(wit_response)\n\n text = add_information_to_text(\n answer) if answer else \"Desculpa, nao entendi o que voce quis dizer\"\n\n speech(text, hub, answer)\n if answer:\n act(answer, hub)\n else:\n act({\"legoAction\": \"colorYellow\"}, hub)\n print(\"No sound detected\")\n time.sleep(2)\n except Exception as exception:\n print(exception)\n\n time.sleep(2)\n hub.motor_external.stop()", "def run(self):\n #use subprocess for your bindings when develop a new functionality\n fulldate = datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")\n\n hours = datetime.now().strftime(\"%I\")\n minutes = datetime.now().strftime(\"%I\")\n\n if self.req_from == 'jabber':\n response = {'request': self.request\n ,'text' : fulldate\n ,'jmsg' : fulldate\n ,'continue' : 0\n ,'type':'response' }\n\n if self.req_from == 'julius':\n response = {'request': self.request\n ,'say': \"IT'S, %d O'CLOCK AND %d MINUTES\" % ( int(hours), int(minutes))\n ,'text' : fulldate\n ,'continue' : 0\n ,'type' : 'response' }\n\n return response\n #import subprocess\n #s = subprocess.Popen(['ffmpeg', '-i', speech, flac ] , stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]", "def start(self):\n\t\tcv2.waitKey(1)\n\t\ttext, _ = self.parse_response(self.sent_text())\n\t\tprint text\n\t\tself.speak(text)\n\t\twhile(True):\n\t\t\tuser_input = self.get_user_text_input()\n\t\t\tresponse = self.sent_text(message = user_input)\n\t\t\ttext, intent = self.parse_response(response)\n\n\t\t\tif response['output'].get('query') is not None:\n\t\t\t\tquery = str(response['output']['query'])\n\t\t\t\tself.speak('Looking for ' + query) \n\t\t\t\tself.speak('This might take a while')\n\t\t\t\tfound, image = process_video.loop_through_frames(label = query)\n\t\t\t\tif found:\n\t\t\t\t\tprint text\n\t\t\t\t\tself.speak(text)\n\t\t\t\t\tcv2.imshow(\"Here it is!\", image)\n\t\t\t\t\tcv2.waitKey()\n\t\t\t\telse:\n\t\t\t\t\tself.speak(\"I am sorry, I could not find what you were looking for\")\n\t\t\t\t\t\n\t\t\t\treturn\n\t\t\tself.speak(text)\n\t\t\t#if intent == 'Lost':\n\t\t\t#\tkey = response['entities'] \n\t\t\t#\tprint \"I am looking for: \" + key\n\t\t\tprint text", "def recognize_async_audio_stream(self, language_code = \"en-US\"): \n if language_code not in self.languages:\n print('\\\"{}\\\" is not a supported language code. Make sure it\\'s supported by Google and try adding adding it to the languages list.\\n'.format(language_code))\n return\n\n self.final_result_queue.queue.clear() # Clear all items in queue for new stream.\n\n config_stream = speech.StreamingRecognitionConfig(\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=self.microphone_handler.RATE,\n language_code=language_code,\n enable_automatic_punctuation=True,\n ),\n interim_results=True \n )\n\n self.microphone_handler.start_recording(streaming=True)\n while self.microphone_handler.streaming:\n data = self.microphone_handler.stream_generator()\n requests = (speech.StreamingRecognizeRequest(audio_content=content) for content in data)\n\n try:\n responses = self.client.streaming_recognize(config_stream, requests)\n for response in responses:\n self.final_result_queue.put(response.results[0])\n if response.results[0].is_final:\n return # Stops more recordings than one. Doesn't halt after recording is done. (temp)\n if self.debug:\n print(response.results[0].alternatives[0].transcript + '\\n') # Print all non final results in terminal(debug).\n except:\n print('Failed to get response.')", "def run(self):\n self.__engine = engine = pyttsx.init()\n if self.__voice_id is not None:\n engine.setProperty('voice', self.__voice_id)\n engine.connect('finished-utterance', self.__next_utterance)\n engine.say('Starting voice process')\n engine.startLoop()", "def listen(limb, hd):\n def callback(data):\n playback(limb, hd, str(data.data))\n\n rospy.Subscriber(\"handshake/play\", std_msgs.msg.String, callback)\n rospy.loginfo('listening...')\n rospy.spin()", "def main():\n\n # Parse arguments\n parser = OptionParser()\n parser.add_option('-n', '--subscription_key', dest='subscription_key',\n help='subscription_key for authentication')\n parser.add_option('-t', '--text', dest='text',\n help='text to synthesize')\n parser.add_option('-l', '--language', dest='language',\n help='language')\n parser.add_option('-g', '--gender', dest='gender',\n help='gender')\n parser.add_option('-d', '--directory', dest='directory',\n help='directory to store the file')\n (options, args) = parser.parse_args()\n subscription_key = options.subscription_key\n text = options.text\n language = options.language\n gender = options.gender\n directory = options.directory\n\n # Perform sanity checks on options\n validate_options(subscription_key, text)\n\n if not directory:\n directory = default_directory\n\n if not language:\n language = default_language\n\n if not gender:\n gender = default_gender\n\n # format = 'riff-16khz-16bit-mono-pcm'\n format = 'riff-8khz-8bit-mono-mulaw'\n\n # lang = 'en-AU'\n # gender = 'Female'\n tts_msspeak = MSSpeak(subscription_key, '/tmp/')\n tts_msspeak.set_cache(False)\n output_filename = tts_msspeak.speak(text, language, gender, format)\n\n print 'Recorded TTS to %s%s' % (directory, output_filename)", "def speak():\n sentences = ['DESTROY ALL HU- I MEAN GREETINGS MEAT BAG',\n 'She sells sea shells by the sea shore', 'Other sentence']\n while True:\n AUDIO.speak(sentences[randint(0, 2)])\n sleep(15)", "def hello_monkey():\n resp = twilio.twiml.Response()\n resp.say(\"Hello I'm a indie go go Robot, if you want to make an appointment with a real life human please text this number instead to get this party started\")\n resp.play(\"http://linode.rabasa.com/cantina.mp3\")\n \n return str(resp)", "def start(self):\n while True:\n requests.get(\"http://localhost:8080/clear\") #clearing the screen on the web browser\n speech=\"Welcome to Smart Mirror !!\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % speech) # calling the text to appear on the browser\n self.speech.synthesize_text(\"hello\"+speech) #synthesizing the text into speech\n speech1=\"Say The launch Phrase .\" #asking the user to say the lauch phrase\n self.speech.synthesize_text(speech1) #speaking of the above line,\n if self.vision.recognize_face(): #checking if\n print \"Face Found\"\t\t\t#the person is infront of camera\n if use_launch_phrase:\t\t\t#checking whether to use the launch phrase or not\n recognizer, audio = self.speech.listen_for_audio()\t\t#initializing\n if self.speech.is_call_to_action(recognizer, audio):\t#checking if the audio is recognized\n self.__acknowledge_action()\t\t\t#if it is recognized take action\n self.decide_action()\t\t\t#deciding which action to be taken\n else:\n self.decide_action()\t\t\t#printing the else part", "def speak(self, *args, **kwargs):\r\n if self.config.get('accessibility', 'speak'):\r\n t = threading.Thread(name = 'Speech thread', target = accessibility.system.speak, args = args, kwargs = kwargs)\r\n t.start()\r\n return t\r\n else:\r\n return False", "def listen_for_speech(threshold=THRESHOLD):\n\n #Open stream\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n print(\"* recording\")\n\n frames = []\n\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n\tdata = stream.read(CHUNK)\n frames.append(data)\n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n \n response = stt_google_wav(WAVE_OUTPUT_FILENAME)\n\n return response", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def run(self):\n # get frame of mic samples\n if self.__stream is None:\n # callin code needs to open stream 1st\n return False\n else:\n # wait for sufficient sample, else pitch plot crashes\n if self.__recorded >= FRAMES_PER_BUFFER:\n # new file if time exceeded\n self.__openSampleFile()\n return True\n else:\n # not enough samples\n return False", "def speech_language_detection_once_from_mic():\n # <SpeechLanguageDetectionWithMicrophone>\n # Creates an AutoDetectSourceLanguageConfig, which defines a number of possible spoken languages\n auto_detect_source_language_config = \\\n speechsdk.languageconfig.AutoDetectSourceLanguageConfig(languages=[\"de-DE\", \"en-US\"])\n\n # Creates a SpeechConfig from your speech key and region\n speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)\n\n # Creates a source language recognizer using microphone as audio input.\n # The default language is \"en-us\".\n speech_language_detection = speechsdk.SourceLanguageRecognizer(\n speech_config=speech_config, auto_detect_source_language_config=auto_detect_source_language_config)\n\n print(\"Say something in English or German...\")\n\n # Starts speech language detection, and returns after a single utterance is recognized. The end of a\n # single utterance is determined by listening for silence at the end or until a maximum of 15\n # seconds of audio is processed. It returns the recognition text as result.\n # Note: Since recognize_once() returns only a single utterance, it is suitable only for single\n # shot recognition like command or query.\n # For long-running multi-utterance recognition, use start_continuous_recognition() instead.\n result = speech_language_detection.recognize_once()\n\n # Check the result\n if result.reason == speechsdk.ResultReason.RecognizedSpeech:\n detected_src_lang = result.properties[\n speechsdk.PropertyId.SpeechServiceConnection_AutoDetectSourceLanguageResult]\n print(\"Detected Language: {}\".format(detected_src_lang))\n elif result.reason == speechsdk.ResultReason.NoMatch:\n print(\"No speech could be recognized\")\n elif result.reason == speechsdk.ResultReason.Canceled:\n cancellation_details = result.cancellation_details\n print(\"Speech Recognition canceled: {}\".format(cancellation_details.reason))\n if cancellation_details.reason == speechsdk.CancellationReason.Error:\n print(\"Error details: {}\".format(cancellation_details.error_details))\n # </SpeechLanguageDetectionWithMicrophone>", "def test_recognize(self):\n\n rec = mi.MyRecognizeCallback()\n rec.on_close()\n rec.on_connected()\n rec.on_data('\"final\": true truetestd')\n rec.on_error(\"testerror\")\n rec.on_hypothesis(\"testh\")\n rec.on_inactivity_timeout(\"testerrorinac\")\n rec.on_listening()\n rec.on_transcription(\"testtr\")\n self.assertIsNotNone(rec)", "def main():\n # transcribe_audio()\n summarize()", "def speech_callback(self, data):\n speech = data.data\n print \"RECEIVED SPEECH: \", speech\n if \"keyword detected\" in speech:\n if self.idling:\n self.control_pub.publish(\"ft go; idle stop; stt go\")\n self.behav_pub.publish(\"greet\")\n # self.behav_pub.publish(random.choice(categorized_behaviors['greeting']))\n elif \"play\" in speech:\n print \"STARTING GAME\"\n self.start_game = \"TTT\"\n elif \"bye\" in speech:\n self.control_pub.publish(\"idle go; stt go; stt_keyword go\")\n elif \"okay\" in speech:\n self.ok = True", "def start_prompt(message):\n reply = ' '.join((\n \"Press and hold screen button with microphone picture.\",\n \"Say your phrase and release the button.\",\n ))\n return bot.reply_to(message, reply)", "def speak(self):\n print(\"hello\")", "def record():\n # Start our TwiML response\n response = VoiceResponse()\n\n # Use <Say> to give the caller some instructions\n response.say('Ahoy! Call recording starts now.')\n\n # Use <Record> to record the caller's message\n response.record()\n\n # End the call with <Hangup>\n response.hangup()\n\n return str(response)", "def start(update, context) -> None:\n global ANSWERS, id_question\n ANSWERS = []\n\n id_question = 0\n update.message.reply_text(\n 'Please select /question to get the poll'\n )", "def run(self):\n from audio import AudioRecorder\n\n loader = SingleInputLoader(128)\n recorder = AudioRecorder()\n\n with tf.Session() as sess:\n model = create_default_model('record', 128, loader)\n model.restore(sess, 'train/best-weights')\n \n while True:\n print('Listening...')\n audio, width = recorder.record()\n audio = np.array(audio)\n\n #calculate the power spectrum of the audio and of sampling rate 16000 \n input_ = preprocess.calculatePowerSpectrogram(audio, 16000)\n\n loader.set_input(input_)\n [decoded] = model.step(sess, loss=False, update=False, decode=True)\n\n decoded_ids_paths = [Test.extract_decoded_ids(path) for path in decoded]\n \n for decoded_path in decoded_ids_paths:\n decoded_ids = next(decoded_path)\n decoded_str = self.idsToSentence(decoded_ids)\n print('Predicted: {}'.format(decoded_str))", "def text_to_speech(entry):\n text = entry.get_text()\n if text:\n subprocess.call([\"milena_say\", text])", "def start_audio_relay(self):\n try:\n self.add_audio_client()\n receive_audio_client_socket, address = \\\n self.receive_audio_socket.accept()\n print(\"connected relay audio\")\n name = self.receive_mes(receive_audio_client_socket)\n self.send_chunk(\"calling\".encode(), receive_audio_client_socket)\n while name not in self.client_audio_dict:\n time.sleep(TIME_SLEEP)\n print(\"waiting for the other client to connect\")\n self.send_chunk(\"wait\".encode(), receive_audio_client_socket)\n self.send_chunk(\"start\".encode(), receive_audio_client_socket)\n send_sock = self.client_audio_dict[name]\n self.receive_and_send_audio(receive_audio_client_socket, send_sock)\n except socket.error as e:\n print(\"socket audio relay fail: \", e)\n self.close_all()\n except Exception as e:\n print(\"audio relay exception: \", e)\n self.close_all()", "def handle_recording():\n \n recording_url = request.values.get(\"RecordingUrl\", None)\n \n resp = twilio.twiml.Response()\n resp.say(\"Thanks for howling... take a listen to what you howled.\")\n resp.play(recording_url)\n resp.say(\"Goodbye.\")\n return str(resp)", "def launch_request_handler(handler_input):\n speech_text = \"Hello! Are you looking to connect and play with others?\"\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello! Are you looking to connect and play with others?\", speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def hear_computer():\n psphinx_process = subprocess.Popen(\n pocketsphinx_commands,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n speak(\"Listener started. Say 'computer' to activate.\")\n time.sleep(0.3)\n while 1:\n # read each line of output, strip the newline and index numbers\n psphinx_output = psphinx_process.stdout.readline().rstrip(b'\\n').decode()\n print(psphinx_output)\n if 'computer' in psphinx_output.lower():\n # kill continuous listening so it does not trigger during listen()\n psphinx_process.kill()\n speak(\"Understood 'computer'. Stopping program.\")\n sys.exit(0)\n # restart continuous listening\n psphinx_process = subprocess.Popen(\n pocketsphinx_commands,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n sys.stdout.flush()", "def handle(text, mic, profile):\n messages = [\"Neustart wird eingeleitet, bis gleich. \"]\n\n message = random.choice(messages)\n\n mic.say(message)\n os.system(\"sudo reboot\")", "def ProcessCall(self, Call):\n Call.MarkAsSeen()\n\n self.SayByVoice(Call, self.Messages['welcome_phrase'] % Call.PartnerDisplayName)\n self.SayByVoice(Call, self.Messages['tell_rules'])\n\n # record wav file with buddy's speech\n TemporaryFileWAV = tempfile.NamedTemporaryFile(prefix= Call.PartnerHandle +\"_record_\", suffix=\".wav\", delete=False)\n TemporaryFileWAV.close()\n\n Call.OutputDevice(Skype4Py.callIoDeviceTypeFile, TemporaryFileWAV.name)\n\n # give 10 seconds for user to speak\n time.sleep(10)\n\n # terminate speech recording\n Call.OutputDevice(Skype4Py.callIoDeviceTypeFile, None)\n\n self.SayByVoice(Call, self.Messages['10_seconds_passed'])\n\n # convert wav into the flac using http://flac.sourceforge.net/ binary\n ChromeRecognizeURL = \"https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=en-US\"\n ConvertCommand = \"flac --channels=1 --sample-rate=16000 %s\" % TemporaryFileWAV.name\n subprocess.call(ConvertCommand)\n\n TemporaryFileFlacName = TemporaryFileWAV.name.replace('.wav','.flac')\n TemporaryFileFlac = open(TemporaryFileFlacName,\"rb\")\n\n # send flac to the google recognize API (warning, this API is unofficial, use only for testing)\n GoogleRecognizeAPIURL = \"https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=en-US\"\n GoogleRecognizeRequest = urllib2.Request(GoogleRecognizeAPIURL, TemporaryFileFlac.read(), {'Content-Type': 'audio/x-flac; rate=16000'})\n DataAnswer = json.loads(urllib2.urlopen(GoogleRecognizeRequest).read())\n\n TemporaryFileFlac.close()\n\n # closest variant is always first in results\n if len(DataAnswer['hypotheses']) > 0:\n ClosestVariant = DataAnswer['hypotheses'][0]['utterance']\n else:\n ClosestVariant = \"nothing\"\n\n self.SayByVoice(Call, \"You asked: %s\" % ClosestVariant)\n self.SayByVoice(Call, \"My answer is: %s\" % self.AI.respond(ClosestVariant, Call.PartnerHandle))\n self.SayByVoice(Call, \"Goodbye!\")\n\n # clean rubbish and finish the call\n os.remove(TemporaryFileWAV.name)\n os.remove(TemporaryFileFlacName)\n\n Call.Finish()", "def test_init(self):\n mic = mi.MicrophoneToText()\n\n self.assertTrue(mic.switch)\n self.assertIsNotNone(mic.resultkeywords)\n self.assertIsNotNone(mic.result)\n self.assertIsNotNone(mic.keywordsshort)\n # tests also chunk and maxbuffer\n self.assertIsNotNone(mic.q)\n self.assertIsNotNone(mic.keywords)\n self.assertIsNotNone(mic.resultkeywords)\n self.assertIsNotNone(mic.speech_to_text)\n # tests also audio, format, channel and rate\n self.assertIsNotNone(mic.stream)\n self.assertIsNotNone(mic.audio_source)", "def microbit_process(pipe):\n gamepad_listener = MicroBitListener(pipe)\n gamepad_listener.listen()", "def handle_recording():\n\n recording_url = request.values.get(\"RecordingUrl\", None)\n\n resp = VoiceResponse()\n resp.say(\"Listen to your recorded message.\")\n resp.play(recording_url)\n resp.say(\"Goodbye.\")\n return str(resp)", "def handle_key():\n #Get the digit pressed by the user\n digit_pressed = request.values.get('Digits',None)\n if digit_pressed ==\"1\":\n resp = twilio.twiml.Response()\n resp.say(\"It's a trap!\")\n resp.play(\"http://demo.twilio.com/hellomonkey/monkey.mp3\")\n return str(resp)\n \n elif digit_pressed ==\"2\":\n resp = twilio.twiml.Response()\n resp.say(\"Record your howl after the tone for Claire please.\")\n resp.record(maxLength=\"30\",action = \"/handle-recording\")\n return str(resp)\n else: \n return redirect(\"/\")", "def ask(self):\n subprocess.run([\"say\", \"-v\", \"Kyoko\", str(self.answer)])", "def initAudio(self):\n\t\t# Initialize pitch detection\n\t\tself.listener = PitchDetect(channels=1)\n\t\tself.listener.listen()\n\t\tself.recording = False\n\t\tself.paused = False", "def voiceSearch(self):\n\n # Set the device ID of the mic that we\n # specifically want to use to avoid ambiguity\n for i, microphone_name in enumerate(\n sr.Microphone.list_microphone_names()):\n if(microphone_name == MIC_NAME):\n device_id = i\n break\n\n # obtain audio from the microphone\n r = sr.Recognizer()\n with sr.Microphone(device_index=device_id) as source:\n # clear console of errors\n subprocess.run(\"clear\")\n\n # wait for a second to let the recognizer adjust the\n # energy threshold based on the surrounding noise level\n r.adjust_for_ambient_noise(source)\n\n print(\"Say something to search for books: \")\n try:\n audio = r.listen(source, timeout=1.5)\n except sr.WaitTimeoutError:\n return None\n\n # recognize speech using Google Speech Recognition\n translation = None\n try:\n translation = r.recognize_google(audio)\n except(sr.UnknownValueError, sr.RequestError):\n pass\n finally:\n return translation", "def handle_speak(event):\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('speak', event, context))", "def main():\n\ttoken = os.getenv(\"BOT_TOKEN\")\n\tapplication = Application.builder().token(token).read_timeout(30).write_timeout(30).build()\n\tload_interactions(application)\n\tprint(\"Simple Media Converter instance started!\")\n\tapplication.run_polling()", "def handle(text, mic, profile):\n if 'motion' not in profile or 'binary' not in profile['motion'] or 'runfile' not in profile['motion']:\n mic.say('Motion does not seem to be set-up correctly.')\n mic.say('Please add motion binary and motion runfile configuration options to you profile.')\n return\n runfile = profile['motion']['runfile']\n binary = profile['motion']['binary']\n responses = ['Hey, something is wrong. I am not supposed to say this.']\n if bool(re.search(r'\\bstop\\b', text, re.IGNORECASE)):\n if os.path.isfile(runfile):\n stopMotion(runfile)\n responses = ['Have it your way.', 'Enjoy your privacy.', 'I will just close my eyes for a second.', 'You are not that interesting anyway.']\n else:\n responses = ['I was not looking at you.', 'You are delusional, nobody is watching.', 'It was not me. It was the N S A.']\n elif bool(re.search(r'\\bstart\\b', text, re.IGNORECASE)):\n if os.path.isfile(runfile):\n responses = ['Did you think I was not paying attention?', 'I am already watching.', 'I have been on guard duty for a while already.']\n else:\n startMotion(binary)\n responses = ['I will keep an eye on things.', 'I will guard this room.', 'I will keep careful watch.', 'I will keep my eyes wide open.']\n mic.say(random.choice(responses))", "def speak(self):\n print(\"meow!\")", "def speak(message):\n print(message)", "def recorder():\n # Following block gets rid of annoying config errors by ALSA\n def py_error_handler(filename, line, function, err, fmt):\n pass\n ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)\n c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)\n asound = cdll.LoadLibrary('libasound.so')\n asound.snd_lib_error_set_handler(c_error_handler) \n\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n yield stream\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "async def app_say() -> Response:\n voice = request.args.get(\"voice\", \"\")\n assert voice, \"No voice provided\"\n\n # cache=false or cache=0 disables WAV cache\n use_cache = request.args.get(\"cache\", \"\").strip().lower() not in {\"false\", \"0\"}\n\n # Text can come from POST body or GET ?text arg\n if request.method == \"POST\":\n text = request.data.decode()\n else:\n text = request.args.get(\"text\")\n\n assert text, \"No text provided\"\n\n vocoder = request.args.get(\"vocoder\")\n denoiser_strength = request.args.get(\"denoiserStrength\")\n if denoiser_strength is not None:\n denoiser_strength = float(denoiser_strength)\n\n wav_bytes = await text_to_wav(\n text,\n voice,\n vocoder=vocoder,\n denoiser_strength=denoiser_strength,\n use_cache=use_cache,\n )\n\n return Response(wav_bytes, mimetype=\"audio/wav\")", "def process_audio(self, voice_rec_array):\n \n if self.use_voice:\n \n voice = voice_rec_array[0]\n \n if self.prev_voice_command != voice:\n \n print(f'voice command: {voice}')\n\n if 'takeoff' in voice:\n self.drone.takeoff() \n print('takeoff')\n\n if 'land' in voice:\n if 'palm' in voice:\n print('palmland')\n self.palm_land_approach()\n\n else:\n self.toggle_tracking(False)\n # self.tracking = False\n self.drone.land()\n self.drone.quit()\n cv2.destroyAllWindows() \n os._exit(0)\n \n \n if 'tracking' in voice:\n if 'no' in voice:\n self.tracking = False\n else:\n self.tracking = True\n\n\n if 'distance' in voice:\n if 'off' in voice:\n self.distance_mode = False\n self.keep_distance = None\n else:\n self.distance_mode = True\n \n\n if 'picture' in voice:\n print('picture in command')\n self.picture_target = re.findall(\"pic_target\\s:\\s'([a-zA-Z\\s]+)\",voice)[0].replace('the ','')\n self.toggle_tracking(tracking=False)\n print('tracking off')\n self.picture_approach = True\n self.target_height = None\n self.search_start_time = time.time()\n \n if 'come' in voice:\n self.rth = True\n\n self.prev_voice_command = voice\n\n # if 'move' in voice:\n # amount = None\n # amount = int(re.findall('[0-9]{2}', voice)[0])\n # print(amount)\n\n # if amount is not None:\n\n # if amount > 30:\n # amount = 30\n # try:\n # if 'forwards' in voice:\n # self.drone.forward(amount)\n \n # if 'backwards' in voice:\n # self.drone.backward(amount)\n\n # if 'left' in voice:\n # self.drone.left(amount)\n\n # if 'right' in voice:\n # self.drone.right(amount)\n\n # self.move_timestamp = time.time()\n\n\n # except:\n # print('not possible')\n \n # self.prev_voice_command = voice", "async def run(self):\n self.add_msg(\"Type your nickname\")\n # Start the new thread that will listen to responses, while the main thread is sending answers\n start_new_thread(self.listenToRespone, ())", "def introduction():\n print(\"Hello, this is a simple program that will convert your input\")\n print(\"into an audio file and play it for you.\")\n user_input = input(\"What audio would you like to hear? \")\n print(\"What language would you like?\")\n print(\"For Arabic enter: 'arb'\")\n print(\"For Chinese enter: 'cmn-CN'\")\n print(\"For Danish enter: 'da-DK'\")\n print(\"For English enter: 'es-US'\")\n print(\"For French enter: 'fr-FR'\")\n print(\"For German enter: 'de-DE'\")\n print(\"For Portuguese enter: 'pl-PT'\")\n print(\"For Spanish enter: 'es-ES'\")\n get_language = input(\"Enter your choice: \")\n if validate_lang(get_language):\n user_language = get_language\n print(\"Success, your language is: \" + LANGUAGE_OPTIONS[get_language])\n else:\n user_language = \"es-US\"\n print(\"Input is not valid, language set to English.\")\n\n return user_input, user_language", "def startKuri(self):\n if self.option == 'c':\n self.useChat()\n elif self.option == 's':\n self.useSpeech()", "def handle_input_from_klat(message):\n audio_file = message.data.get(\"raw_audio\")\n nick = message.data.get(\"user\")\n loop.consumer.chat_user_database.update_profile_for_nick(nick)\n chat_user = loop.consumer.chat_user_database.get_profile(nick)\n stt_language = chat_user[\"speech\"].get('stt_language', 'en')\n request_id = f\"sid-{message.data.get('sid')}-{message.data.get('socketIdEncrypted')}-\" \\\n f\"{nick}-{message.data.get('nano')}\" # Formerly known as 'flac_filename'\n\n try:\n nick_profiles = loop.consumer.chat_user_database.get_nick_profiles(message.data.get(\"cid_nicks\"))\n except TypeError:\n nick_profiles = loop.consumer.chat_user_database.get_nick_profiles([nick])\n mobile = message.data.get(\"nano\") == \"mobile\"\n if mobile:\n client = \"mobile\"\n elif message.data.get(\"nano\") == \"true\":\n client = \"nano\"\n else:\n client = \"klat\"\n ident = time.time()\n\n LOG.debug(audio_file)\n if audio_file:\n try:\n audio_data, audio_context, transcriptions = _get_stt_from_file(audio_file, stt_language)\n # segment = AudioSegment.from_file(audio_file)\n # audio_data = AudioData(segment.raw_data, segment.frame_rate, segment.sample_width)\n # LOG.debug(\"Got audio_data\")\n # audio, audio_context = loop.responsive_recognizer.audio_consumers.get_context(audio_data)\n # LOG.debug(f\"Got context: {audio_context}\")\n # audio_context[\"user\"] = nick\n\n if message.data.get(\"need_transcription\"):\n # transcriptions = loop.consumer.transcribe(audio) # flac_data for Google Beta STT\n LOG.debug(f\"return stt to server: {transcriptions}\")\n bus.emit(Message(\"css.emit\", {\"event\": \"stt from mycroft\",\n \"data\": [transcriptions[0], request_id]}))\n # else:\n # # transcriptions = [message.data.get(\"shout_text\")]\n except Exception as x:\n LOG.error(x)\n transcriptions = [message.data.get(\"shout_text\")]\n audio_context = None\n elif message.data.get(\"need_transcription\"):\n LOG.error(f\"Need transcription but no audio passed! {message}\")\n return\n else:\n audio_context = None\n transcriptions = [message.data.get(\"shout_text\")]\n\n if not transcriptions:\n LOG.warning(f\"Null Transcription!\")\n return\n\n data = {\n \"utterances\": transcriptions,\n \"lang\": stt_language\n }\n context = {'client_name': 'mycroft_listener',\n 'source': 'klat',\n 'destination': [\"skills\"],\n \"audio_parser_data\": audio_context,\n \"raw_audio\": message.data.get(\"raw_audio\"),\n \"mobile\": mobile, # TODO: Depreciate and use client DM\n \"client\": client, # origin (local, klat, nano, mobile, api)\n \"klat_data\": {\"cid\": message.data.get(\"cid\"),\n \"sid\": message.data.get(\"sid\"),\n \"title\": message.data.get(\"title\"),\n \"nano\": message.data.get(\"nano\"),\n \"request_id\": request_id},\n # \"flac_filename\": flac_filename,\n \"neon_should_respond\": False,\n \"username\": nick,\n \"nick_profiles\": nick_profiles,\n \"cc_data\": {\"speak_execute\": transcriptions[0],\n \"raw_utterance\": transcriptions[0]}, # TODO: Are these necessary anymore? Shouldn't be DM\n \"timing\": {\"start\": message.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": ident\n }\n LOG.debug(\"Send server request to skills for processing\")\n _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))", "async def on_start(objs, event):\n channel = objs['channel']\n await channel.answer()\n playback = await channel.play(media='sound:demo-congrats')\n\n async def on_dtmf(channel, event):\n \"\"\"Callback for DTMF events.\n\n DTMF events control the playback operation.\n\n :param channel: Channel DTMF was received on.\n :param event: Event.\n \"\"\"\n # Since the callback was registered to a specific channel, we can\n # control the playback object we already have in scope.\n # TODO: if paused: unpause before doing anything else\n digit = event['digit']\n if digit == '5':\n await playback.control(operation='pause')\n elif digit == '8':\n await playback.control(operation='unpause')\n elif digit == '4':\n await playback.control(operation='reverse')\n elif digit == '6':\n await playback.control(operation='forward')\n elif digit == '2':\n await playback.control(operation='restart')\n elif digit == '#':\n await playback.stop()\n await channel.continueInDialplan()\n else:\n print >> sys.stderr, \"Unknown DTMF %s\" % digit\n\n channel.on_event('ChannelDtmfReceived', on_dtmf)", "def run(self):\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(format=pyaudio.paFloat32, channels=self.CHANNELS, rate=self.RATE, input=True,\n output=False, stream_callback=self.callback)\n self.stream.start_stream()\n self.stop.setSingleShot(True)\n self.stop.start()", "def handle_record_begin():\n LOG.info(\"Begin Recording...\")\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('recognizer_loop:record_begin', context=context))", "def func(self):\n if not self.raw:\n self.msg(\"Say what?\")\n return\n options = {\"is_pose\": True}\n speech = self.raw.lstrip(\" \")\n # calling the speech hook on the location\n speech = self.caller.location.at_say(speech)\n # Feedback for the object doing the talking.\n langstring = \"\"\n current = self.caller.languages.current_language\n if current and current.lower() != \"arvani\":\n langstring = \" in %s\" % current.capitalize()\n options.update({\"language\": current, \"msg_content\": speech})\n self.msg(\n 'You say%s, \"%s{n\"' % (langstring, speech),\n from_obj=self.caller,\n options=options,\n )\n # Build the string to emit to neighbors.\n pre_name_emit_string = ' says%s, \"%s{n\"' % (langstring, speech)\n self.caller.location.msg_action(\n self.caller, pre_name_emit_string, exclude=[self.caller], options=options\n )\n self.caller.posecount += 1", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def start_recording(self) -> None:\n # Clear the internal ring buffer.\n self._buffer.fill(0)\n\n # Start recording using sounddevice's InputStream.\n self._stream.start()", "def input_audio_icon():\n if use_espeak_synthesis:\n os.system(\"espeak \\\"Type in\\\"\")", "def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())", "def polly_request_speech(intext: str, intlanguage: str):\n session = Session(profile_name=\"default\")\n polly = session.client(\"polly\")\n try:\n response = polly.synthesize_speech(Text=intext,LanguageCode = intlanguage,OutputFormat=\"mp3\",VoiceId=\"Joanna\")\n print(response)\n except (BotoCoreError, ClientError) as error:\n print(error)\n sys.exit(1)\n return response", "def make_phone_call(self):\n client = Client(account_sid, auth_token)\n\n call = client.calls.create(\n url='http://demo.twilio.com/docs/classic.mp3',\n to=self.emergency_number,\n from_='+16505499680'\n )\n\n print(call.sid)", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r" ]
[ "0.71735287", "0.70460767", "0.702662", "0.69203657", "0.69143", "0.6879274", "0.68592507", "0.6835375", "0.6740074", "0.66967285", "0.6650779", "0.65125155", "0.6443816", "0.6381483", "0.6351039", "0.62422884", "0.6159692", "0.61501604", "0.6081473", "0.60642445", "0.6046894", "0.6045098", "0.6038042", "0.5988153", "0.5970528", "0.59702027", "0.5962688", "0.5960065", "0.59313154", "0.59036994", "0.59007585", "0.58699214", "0.5845129", "0.5832543", "0.5812032", "0.5812032", "0.57315975", "0.5702337", "0.56739855", "0.5668807", "0.5662943", "0.565569", "0.5637141", "0.5613427", "0.5543779", "0.5523898", "0.5511808", "0.55067056", "0.55011564", "0.5490285", "0.5484505", "0.5484359", "0.54791516", "0.5476712", "0.5471593", "0.5470302", "0.5465936", "0.546359", "0.54554754", "0.5450286", "0.54405904", "0.54242086", "0.54171205", "0.5406958", "0.54047704", "0.53939784", "0.53759784", "0.5368344", "0.53650504", "0.53581655", "0.53501874", "0.5348697", "0.5345673", "0.5340583", "0.533047", "0.53294116", "0.53100187", "0.52801573", "0.52607495", "0.525842", "0.5250861", "0.5246845", "0.524628", "0.5235746", "0.5215071", "0.52082634", "0.52041376", "0.52028453", "0.519882", "0.5195328", "0.51946735", "0.51938134", "0.5192588", "0.5186561", "0.5174667", "0.51564246", "0.5156399", "0.51514953", "0.5148814", "0.5147064" ]
0.5937994
28
Triggers a manual build of the project.
async def trigger_build(self, *, branch=None, message=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_build(self, postdata):\n pass", "def build(self):\n logging.info('Build %s of %s (%s)', self._build, self.name,\n self.working_dir)\n self._build += 1\n self._event = None\n status = self._builder.execute_script(self.working_dir, self.script)\n self._show_notification(status)", "def TriggerBuild(self):\n readthedocs_url = u'https://readthedocs.org/build/{0:s}'.format(\n self._project)\n\n try:\n self._url_lib_helper.Request(readthedocs_url, post_data=b'')\n\n except errors.ConnectionError as exception:\n logging.warning(u'{0!s}'.format(exception))\n return False\n\n return True", "def post_build_project(project_data):\n ListenerManager.call(_project_post_build_manager, project_data)", "def build(self):\n self.puts(colored.blue(\"Building project...\"))\n\n if os.path.exists(self.build_path):\n shutil.rmtree(self.build_path)\n os.makedirs(self.build_path)\n\n with indent(2):\n self._reset_build_sequence_id()\n self._build_pre_project_template()\n self._build_project_template()\n self._build_pre_resources_template()\n self._build_resources_template()\n self._build_post_resources_template()", "def pre_build_project(project_data):\n ListenerManager.call(_project_pre_build_manager, project_data)", "def monitor_project_build(self, project_name):\n pass", "def force(self, **kwargs):\n log.info(\"Forcing a build\")\n self._force = True", "def build_trigger(ctx, build_type_id, branch, comment, parameter, agent_id,\n open_build_log, wait_for_run):\n parameters = dict([p.split('=', 1) for p in parameter])\n data = ctx.obj.trigger_build(\n build_type_id=build_type_id,\n branch=branch,\n comment=comment,\n parameters=parameters,\n agent_id=agent_id)\n build_id = data['id']\n ctx.invoke(build_queue_show, args=[build_id])\n if open_build_log:\n url = data['webUrl'] + '&tab=buildLog'\n webbrowser.open(url)\n if not wait_for_run:\n return\n while data['state'] == 'queued':\n data = ctx.obj.get_queued_build_by_build_id(build_id)\n click.echo('state: %s' % data['state'])\n time.sleep(1)\n ctx.invoke(build_queue_show, args=[build_id])", "def force(self):\n print \"Forcing a build by touching files\"\n os.chdir(self.version.project.conf_dir(self.version.slug))\n os.system('touch * && touch */*')", "def autoBuild (self, event = None):\r\n if self.autobuildmenuitem.IsChecked():\r\n self.autobuildtimer.Start(5000)\r\n self.autoBuildStart();\r\n else:\r\n self.autobuildtimer.Stop()", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def run(self):\n self.scion_sh('run', 'nobuild')", "def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()", "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def start_build(args):\n\n path = os.path.join(SCRATCH_DIR, args.project)\n \n # Set up virtual environment\n print(\"Setting up virtual python environment in %s\" % path)\n venv.create(path, clear=True, symlinks=True, with_pip=False)\n\n # Pull in repository data\n sourcepath = os.path.join(path, 'source')\n print(\"Cloning from git repository %s (branch: %s)\" % (args.source, args.sourcebranch))\n subprocess.run((GIT, 'clone', '--branch', args.sourcebranch, '--depth=1', '--no-single-branch', args.source, sourcepath),\n check=True)\n\n # Activate venv and install pips if needed. For dev/test, we will\n # assume that all requirements are available at the system level,\n # rather than needing to install them into the venv.\n ### note: this makes it difficult to test requirements.txt, but it\n ### will do for now. Debugging requirements.txt failures on the\n ### production buildbot is not difficult to correct.\n if IS_PRODUCTION and os.path.exists(os.path.join(sourcepath, 'requirements.txt')):\n print(\"Installing pips\")\n subprocess.run(('/bin/bash', '-c',\n 'source bin/activate; pip3 install -r source/requirements.txt'),\n cwd=path, check=True)\n else:\n print(\"On dev/test requirements.txt is not processed, skipping pip\")\n\n # Where are our tools?\n if IS_PRODUCTION:\n tool_dir = PELICANFILES\n else:\n tool_dir = THIS_DIR\n print(\"TOOLS:\", tool_dir)\n\n pelconf_yaml = os.path.join(sourcepath, AUTO_SETTINGS_YAML)\n if os.path.exists(pelconf_yaml):\n settings_path = os.path.join(path, AUTO_SETTINGS)\n if IS_PRODUCTION:\n builtin_plugins = PLUGINS\n else:\n builtin_plugins = os.path.join(tool_dir, os.pardir, 'plugins')\n generate_settings(pelconf_yaml, settings_path, [ builtin_plugins ], sourcepath)\n else:\n # The default name, but we'll pass it explicitly.\n settings_path = os.path.join(sourcepath, 'pelicanconf.py')\n\n # Set currently supported plugins\n ### this needs to be removed, as it is too indeterminate.\n with open(settings_path, 'a') as f:\n f.write(\"\"\"\ntry:\n PLUGINS += ['toc']\nexcept:\n PLUGINS = ['toc', 'gfm']\n\"\"\")\n\n # Call pelican\n buildpath = os.path.join(path, 'build/output')\n os.makedirs(buildpath, exist_ok = True)\n buildcmd = ('/bin/bash', '-c',\n 'source bin/activate; cd source && '\n ### note: adding --debug can be handy\n f'(pelican content --settings {settings_path} -o {buildpath})',\n )\n print(\"Building web site with:\", buildcmd)\n env = os.environ.copy()\n env['LIBCMARKDIR'] = LIBCMARKDIR\n subprocess.run(buildcmd, cwd=path, check=True, env=env)\n\n count = len(glob.glob(f'{buildpath}/**/*.html', recursive=True))\n print(f\"{count} html files.\")\n if args.count > 0 and args.count > count:\n print(\"Not enough html pages in the Web Site. Minimum %s > %s found in the Web Site.\" % (args.count, count))\n sys.exit(4)\n\n # Done for now\n print(\"Web site successfully generated!\")\n\n # It is much easier to do all the below, if we chdir()\n os.chdir(sourcepath)\n\n # Copy to result branch\n print(\"Copying web site to branch:\", args.outputbranch)\n\n try:\n subprocess.run((GIT, 'rev-parse', '--verify', \"origin/%s\" % args.outputbranch),\n check=True)\n print(\"- Doing fresh checkout of branch %s\" % args.outputbranch)\n subprocess.run((GIT, 'checkout', args.outputbranch, '-f'), check=True)\n subprocess.run((GIT, 'pull'), check=True)\n except:\n print(\"- Branch %s does not exist (yet), creating it...\" % args.outputbranch)\n # If .asf.yaml exists, which it should, make a copy of it in memory for later\n asfyml = os.path.join(sourcepath, '.asf.yaml')\n myyaml = None\n if os.path.exists(asfyml):\n myyaml = open(asfyml).read()\n subprocess.run((GIT, 'checkout', '--orphan', args.outputbranch), check=True)\n subprocess.run((GIT, 'rm', '-rf', '.'), check=True)\n # Add .asf.yaml back in if we found it.\n if myyaml:\n open(asfyml, \"w\").write(myyaml)\n subprocess.run((GIT, 'add', '.asf.yaml'), check=True)\n\n print(\"- Adding new content to branch\")\n # RM output dir if it already exists\n outputdir = os.path.join(sourcepath, 'output')\n if os.path.isdir(outputdir):\n print(\"Removing existing output dir %s\" % outputdir)\n shutil.rmtree(outputdir)\n shutil.move(buildpath, outputdir)\n subprocess.run((GIT, 'add', 'output/'), check=True)\n\n # Check if there are any changes.\n cp = subprocess.run((GIT, 'diff', '--cached', '--quiet'))\n if cp.returncode == 0:\n # There were no differences reported.\n print('Nothing new to commit. Ignoring this build.')\n else:\n print(\"- Committing to %s\" % args.source)\n subprocess.run((GIT, 'commit', '-m', 'Automatic Site Publish by Buildbot'), check=True)\n\n # If we're not in production, then avoid pushing changes.\n if IS_PRODUCTION:\n print('- Pushing changes, for publishing')\n subprocess.run((GIT, 'push', args.source, args.outputbranch), check=True)\n\n print('Success. Done.')\n # for dev/test provide viewing instructions\n if not IS_PRODUCTION:\n if args.listen:\n try:\n subprocess.run(('pelican','-l'), check=True)\n except KeyboardInterrupt:\n pass\n else:\n print(f'To test output:\\ncd {sourcepath}; pelican -l')", "def build(parameters):\n\n\n print(\"In Build module\")", "def start_build(self, build_id):\n pass", "def build(self, force: bool = False) -> BuildResult:\n raise NotImplementedError()", "def execute_build(\n self,\n tasks: List[ReleaseTask],\n bld_args: RepoBuildArgs,\n ) -> None:", "def actionBuild():\n\n #Init builder logger\n Builder.init()\n\n for target in Settings.targets:\n targetsToBuild, combineLibs, copyToOutput = Builder.getTargetGnPath(target)\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n if System.checkIfCPUIsSupportedForPlatform(cpu,platform):\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_PREPARE, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n result = Builder.run(target, targetsToBuild, platform, cpu, configuration, combineLibs, copyToOutput)\n Summary.addSummary(ACTION_BUILD, target, platform, cpu, configuration, result, Builder.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed building ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.RED)\n #Terminate script execution if stopExecutionOnError is set to True in userdef\n shouldEndOnError(result)\n else:\n Logger.printEndActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration)\n else:\n Logger.printColorMessage('Build cannot run because preparation has failed for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n Logger.printEndActionMessage('Build not run for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)", "def pre_configure_project(source_dir, build_dir):\n ListenerManager.call(_project_pre_configure_manager, source_dir, build_dir)", "def subscribeToSuccessfulBuilds(target):", "def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()", "def _doReleaseBuild(self, farbconfig):\n print \"Building all releases ...\"\n try:\n rbr = runner.ReleaseBuildRunner(farbconfig)\n rbr.run()\n print \"Release build completed.\"\n except runner.ReleaseBuildRunnerError, e:\n print >>sys.stderr, e\n sys.exit(1)", "def build():\n local('wintersmith build')", "def trigger(builder, revision, files=[], dry_run=False, extra_properties=None):\n repo_name = query_repo_name_from_buildername(builder)\n return buildapi.trigger_arbitrary_job(repo_name, builder, revision, files, dry_run,\n extra_properties)", "def grunt_build():\n local('cd {{ project_name }} && grunt build')", "def rebuild(options, project_directory=None):\n if options.help:\n print rebuild.__doc__\n sys.exit(1)\n\n if not project_directory:\n project_directory = os.getcwd()\n action_rebuild(project_directory)", "def test_build(self):\n self.app.build()", "def build(ctx):\n generate_build_files(ctx)\n compile_build_files(ctx)\n _package_plugins(ctx)", "def main(type, project, author, email):\n while os.path.exists(project):\n click.echo('The project has been exists. Would you want to rebuild the project?\\n')\n click.echo('> {:<12}\\tfor\\tcontinue'.format('YES'))\n click.echo('> {:<12}\\tfor\\tbreak'.format('NO'))\n click.echo('> {:<12}\\tfor\\tbuilding another project\\n'.format('PROJECT NAME'))\n confirm_info = input('> ').strip().lower()\n if confirm_info == 'yes':\n shutil.rmtree(project)\n elif confirm_info == 'no':\n return\n else:\n project = confirm_info\n my_project = CreateNewProject(type, project, author, email)\n my_project.run()", "def post_build_target(target_data, toolchain):\n ListenerManager.call(_target_post_build_manager, target_data, toolchain)", "def build(self, conanfile):\n app = ConanApp(self._conan_api.cache_folder)\n conanfile.folders.set_base_package(conanfile.folders.base_build)\n conanfile.folders.set_base_pkg_metadata(os.path.join(conanfile.build_folder, \"metadata\"))\n run_build_method(conanfile, app.hook_manager)", "def main():\n parser = argparse.ArgumentParser(\n description = \"Find latest successful build for given product/version\"\n )\n parser.add_argument('--product', default=\"couchbase-server\",\n help=\"Product name\")\n parser.add_argument('--version', required=True, help=\"Version number\")\n\n args = parser.parse_args()\n\n trigger = SanityTrigger(args.product, args.version)\n\n last_bld = trigger.get_last_sanity()\n bld_num = trigger.get_latest_build()\n if bld_num > last_bld:\n print (\"Writing \" + TRIGGER_PROPERTIES_FILENAME)\n trigger.set_last_sanity(bld_num)\n trigger.write_properties(TRIGGER_PROPERTIES_FILENAME)\n else:\n print (\"Nothing to do; not writing \" + TRIGGER_PROPERTIES_FILENAME)\n if (os.path.exists(TRIGGER_PROPERTIES_FILENAME)):\n os.unlink(TRIGGER_PROPERTIES_FILENAME)", "def build():\n click.echo(\"start build your pelican project...\")\n # copy_mathjax(PUBLISHDIR)\n \n cmd = \"pelican {INPUTDIR} -o {PUBLISHDIR} -s {PUBLISHCONF}\".format(\n INPUTDIR=INPUTDIR,\n PUBLISHCONF=PUBLISHCONF,\n PUBLISHDIR=PUBLISHDIR\n )\n\n click.echo('start run cmd: {0}'.format(cmd))\n ret = subprocess.call(cmd, shell=True)\n\n click.echo('running result is:{0}'.format(ret))", "def test_build_manual_run(self):\n repository = self._create_repository()\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n config = self._create_config()\n self.integration.enable_integration()\n\n status_update = \\\n self.create_status_update(service_id='travis-ci',\n review_request=review_request)\n\n data = self._spy_on_make_request()\n\n status_update_request_run.send(sender=self.__class__,\n status_update=status_update)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://api.travis-ci.org/repo/'\n 'mypublicorg%2Fmypublicorgrepo/requests')\n\n self.assertEqual(\n data['request']['config']['env']['global'],\n [\n 'REVIEWBOARD_STATUS_UPDATE_ID=1',\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,\n ])\n\n self.assertEqual(data['request']['message'],\n 'Test Summary\\n\\nTest Description')\n self.assertTrue('git fetch --unshallow origin || true'\n in data['request']['config']['before_install'])\n self.assertTrue('git checkout %s' % diffset.base_commit_id\n in data['request']['config']['before_install'])\n self.assertEqual(data['request']['branch'], 'review-requests')", "def pre_build_hook(self):", "def post_process(self, **kwargs):\n self.create_ignore()\n click.echo('Create project {} successfully. Enjoy yourself!'.format(self.app_dir))", "def build (self, event = None):\r\n dialog = wx.FileDialog(self, 'Build Story', os.getcwd(), \"\", \\\r\n \"Web Page (*.html)|*.html\", \\\r\n wx.SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)\r\n \r\n if dialog.ShowModal() == wx.ID_OK:\r\n self.buildDestination = dialog.GetPath()\r\n self.rebuild(None, True)\r\n \r\n dialog.Destroy()", "def post_build_hook(self):", "def build():\n gulp('build')", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def script_build(repo_root, spec, build_args, verbose):\n env = os.environ\n env.update(build_args)\n for s in spec.build_script():\n if verbose:\n click.echo(str(s))\n ret = subprocess.call(str(s), shell=True, cwd=repo_root, env=env)\n if ret:\n raise click.ClickException(\"{} exited with code {}\".format(str(s), ret))", "def trigger_cb(file_name: str, commit_hash: str, project_name: str) -> None:\n\n cb_client = boto3.client(\"codebuild\")\n build = {\n \"projectName\": project_name,\n \"sourceVersion\": commit_hash,\n \"environmentVariablesOverride\": [\n {\"name\": \"REQ_FILENAME\", \"value\": file_name, \"type\": \"PLAINTEXT\"}\n ],\n }\n cb_client.start_build(**build)", "def build(target_dir):\n prepare_demo_site(target_dir)\n\n patch_config(\n target_dir, (\"# CREATE_FULL_ARCHIVES = False\", \"CREATE_FULL_ARCHIVES = True\")\n )\n\n with cd(target_dir):\n __main__.main([\"build\"])", "def pkg_build(self):\n msg = self.msg\n msg.info('building [%s-%s]... (%s)',\n self.pkg_name,\n self.pkg_ver,\n self.env['CMTCONFIG'])\n msg.debug('build-dir: [%s]', self.pkg_build_dir)\n msg.debug('install: [%s]', self.pkg_install_dir)\n \n _build_done = osp.join(osp.dirname(self.pkg_build_dir),\n 'pkg-build-%(pkg_name)s.done' % self.env)\n\n if not osp.exists(_build_done):\n\n import time\n start = time.asctime()\n self.pre_build()\n self.build()\n self.post_build()\n end = time.asctime()\n\n with open(_build_done, 'w') as f:\n f.write('start: %s\\n' % start)\n f.write('done: %s\\n' % end)\n f.flush()\n pass\n\n msg.debug('building [%s-%s]... (%s) [done]',\n self.pkg_name,\n self.pkg_ver,\n self.env['CMTCONFIG'])\n return 0", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())", "def pull_build_project(project, branch=\"master\"):\n repo_result, pull_log = pull(project.path, app.config[\"SSH_PRIVATE\"], branch)\n if repo_result:\n newjob = create_job(project, pull_log)\n settings = abcparse(os.path.join(project.path, \"build.abc\"))\n build(newjob, project, settings[\"windows\"]) #TODO: Handle configs", "def trigger_build(\n commit, source_version, lambda_name, buildspec_file_name):\n buildspec_override = \\\n _get_buildspec_override(\n lambda_name, buildspec_file_name)\n # return codebuild response for debug\n return codebuild_client.start_build(\n projectName=os.environ['PROJECT'],\n sourceVersion=source_version,\n environmentVariablesOverride=[\n {\n 'name': 'LAMBDA',\n 'value': lambda_name,\n },\n # WARNING: this is rather ugly hack\n # since now we are getting code from s3\n # we lose information about initail commit\n {\n 'name': 'GITHUB_COMMIT',\n 'value': commit\n }\n ],\n buildspecOverride=buildspec_override)", "def test_quick_build1(self):\n pass", "def _doPackageBuild(self, farbconfig):\n print \"Building all packages ...\"\n try:\n pbr = runner.PackageBuildRunner(farbconfig)\n pbr.run()\n print \"Package build completed.\"\n except runner.PackageBuildRunnerError, e:\n print >>sys.stderr, e\n sys.exit(1)", "def build(c, force=None):\n for sp_ns in ns_foreach_task_subdir(c):\n print(\"-- running build in \", os.getcwd())\n\n # sp_ns.tasks.build(c, force)\n c.run('invoke build')", "def build(self, build):\n\n self._build = build", "def schedule_build(self, timeout=None):\n\n if timeout is None:\n timeout = self._config['build_timeout']\n\n logging.debug('Scheduling a build in %ss', timeout)\n self._event = self._scheduler.schedule(\n self._event, timeout, self.build)", "def test_quick_build(self):\n pass", "def handle(self, *args, **options):\n build([\n # StoreBuilds(datetime.datetime.now().strftime('%b_%d_%y_%H_%M'))\n StoreBuilds('May_10_21_14_19')\n ], local_scheduler=True)", "def main():\n log = get_logger(\"bakery.bake.main\")\n config = configure()\n\n bake_instructions = PREAMBLE + open(config.bakefile).read()\n\n try:\n exec(bake_instructions, globals())\n log.info(fg.bright.green(\"BUILD SUCCEEDED\"))\n except Exception as e:\n build.critical = True\n log.error(str(e))\n if DEBUG:\n log.exception(\"Exception details follow.\")\n log.info(fg.bright.red(\"BUILD FAILED\"))\n\n if build.build_count == 0 and not build.critical:\n log.warn(\n ansilog.Node.list(\n \"Nothing was built, did you forget to decorate your module with \",\n fg.bright.yellow(\"@build\"),\n \"?\",\n )\n )\n\n sys.exit(0)", "def build(ctx, arch, type, build_num, verbose, goal, extra_args_str):\n # Run the supplied build script if there is one, otherwise assume cmake\n # Parse file to find requirements then check that they exist, then build\n project_config = ctx.obj.project_config()\n component = ComponentConfiguration(project_config['components'][0])\n spec = component.get_spec(goal, arch, type)\n build_args = {}\n extra_args = parse_key_value_pairs(extra_args_str)\n build_args.update(spec.build_vars())\n build_args.update(extra_args)\n add_version_args(ctx.obj.repo_root, build_num, build_args)\n if spec.build_script() is None:\n cmake_build(ctx.obj.repo_root, spec.build_arch(), spec.build_type(), spec.build_goal(), verbose, build_args)\n else:\n script_build(ctx.obj.repo_root, spec, build_args, verbose)\n try:\n ctx.obj.build_server().publish_artifacts(spec.build_artifacts())\n except click.ClickException:\n pass", "def trigger_deploy_build(\n commit, source_version, lambda_name):\n return trigger_build(\n commit, source_version, lambda_name, 'buildspec-build')", "def pre_build_target(target_data, toolchain):\n ListenerManager.call(_target_pre_build_manager, target_data, toolchain)", "def do_build(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tself.log('PHASE: build, repository work', level=logging.DEBUG)\n\t\tmodule_id_list = self.module_ids()\n\t\tif self.build['deps_only']:\n\t\t\tmodule_id_list_build_only = filter(lambda x: cfg[x]['shutit.core.module.build'], module_id_list)\n\t\tfor module_id in module_id_list:\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tself.log('Considering whether to build: ' + module.module_id, level=logging.INFO)\n\t\t\tif cfg[module.module_id]['shutit.core.module.build']:\n\t\t\t\tif self.build['delivery'] not in module.ok_delivery_methods:\n\t\t\t\t\tself.fail('Module: ' + module.module_id + ' can only be built with one of these --delivery methods: ' + str(module.ok_delivery_methods) + '\\nSee shutit build -h for more info, or try adding: --delivery <method> to your shutit invocation') # pragma: no cover\n\t\t\t\tif self.is_installed(module):\n\t\t\t\t\tself.build['report'] = (self.build['report'] + '\\nBuilt already: ' + module.module_id + ' with run order: ' + str(module.run_order))\n\t\t\t\telse:\n\t\t\t\t\t# We move to the module directory to perform the build, returning immediately afterwards.\n\t\t\t\t\tif self.build['deps_only'] and module_id == module_id_list_build_only[-1]:\n\t\t\t\t\t\t# If this is the last module, and we are only building deps, stop here.\n\t\t\t\t\t\tself.build['report'] = (self.build['report'] + '\\nSkipping: ' + module.module_id + ' with run order: ' + str(module.run_order) + '\\n\\tas this is the final module and we are building dependencies only')\n\t\t\t\t\telse:\n\t\t\t\t\t\trevert_dir = os.getcwd()\n\t\t\t\t\t\tself.get_current_shutit_pexpect_session_environment().module_root_dir = os.path.dirname(self.shutit_file_map[module_id])\n\t\t\t\t\t\tself.chdir(self.get_current_shutit_pexpect_session_environment().module_root_dir)\n\t\t\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\t\t\tself.build_module(module)\n\t\t\t\t\t\tself.logout(echo=False)\n\t\t\t\t\t\tself.chdir(revert_dir)\n\t\t\tif self.is_installed(module):\n\t\t\t\tself.log('Starting module',level=logging.DEBUG)\n\t\t\t\tif not module.start(self):\n\t\t\t\t\tself.fail(module.module_id + ' failed on start', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover", "def build(ctx, vstudio_root=None, arch=\"x64\", major_version='7', debug=False):\n\n if sys.platform != 'win32':\n print(\"Custom action library is only for Win32\")\n raise Exit(code=1)\n\n package_version = get_version(ctx, url_safe=True, major_version=major_version)\n ver = get_version_numeric_only(ctx, major_version=major_version)\n build_maj, build_min, build_patch = ver.split(\".\")\n verprops = f\" /p:MAJ_VER={build_maj} /p:MIN_VER={build_min} /p:PATCH_VER={build_patch} \"\n print(f\"arch is {arch}\")\n cmd = \"\"\n configuration = \"Release\"\n if debug:\n configuration = \"Debug\"\n\n if not os.getenv(\"VCINSTALLDIR\"):\n print(\"VC Not installed in environment; checking other locations\")\n\n vsroot = vstudio_root or os.getenv('VSTUDIO_ROOT')\n if not vsroot:\n print(\"Must have visual studio installed\")\n raise Exit(code=2)\n batchfile = \"vcvars64.bat\"\n if arch == \"x86\":\n batchfile = \"vcvars32.bat\"\n vs_env_bat = f'{vsroot}\\\\VC\\\\Auxiliary\\\\Build\\\\{batchfile}'\n cmd = f'call \"{vs_env_bat}\" && msbuild {CUSTOM_ACTION_ROOT_DIR}\\\\cal /p:Configuration={configuration} /p:Platform={arch}'\n else:\n cmd = f'msbuild {CUSTOM_ACTION_ROOT_DIR}\\\\cal /p:Configuration={configuration} /p:Platform={arch}'\n\n cmd += verprops\n print(f\"Build Command: {cmd}\")\n\n # Try to run the command 3 times to alleviate transient\n # network failures\n succeeded = try_run(ctx, cmd, 3)\n if not succeeded:\n raise Exit(\"Failed to build the customaction.\", code=1)\n\n artefacts = [\n {\"source\": \"customaction.dll\", \"target\": \"customaction.dll\"},\n {\"source\": \"customaction.pdb\", \"target\": f\"customaction-{package_version}.pdb\"},\n {\"source\": \"customaction-tests.exe\", \"target\": \"customaction-tests.exe\"},\n ]\n for artefact in artefacts:\n shutil.copy2(\n f\"{CUSTOM_ACTION_ROOT_DIR}\\\\cal\\\\{arch}\\\\{configuration}\\\\{artefact['source']}\",\n BIN_PATH + f\"\\\\{artefact['target']}\",\n )", "def compile_project(project_name, project_revision, template, working_dir):\n tcl_file = \"compile_project.tcl\"\n with open(working_dir + tcl_file, \"w\") as compile_file:\n compile_file.write(template.add_quartus_compile_project(\n project_name, project_revision))\n\n log_msg = \"Compiling project\"\n cmd = f\"cd {working_dir} && {QUARTUS_BIN_DIR}quartus_sh -t {tcl_file}\"\n log_file_path = working_dir + \"compile_project.log\"\n\n run_cmd_and_log(cmd, log_msg, log_file_path)", "def task_build(argv):\n pytaskmaster.generator(\"setup.py.in\", \"setup.py\", config)\n pytaskmaster.generator(\"pytaskmaster/version.py.in\", \"pytaskmaster/version.py\", config)\n shell(\"python setup.py bdist_wheel\")\n if \"--sign\" in argv:\n for file in os.listdir(\"dist\"):\n asc_file = \"dist/\" + file + \".asc\"\n if file.endswith(\".whl\") and not os.path.isfile(asc_file):\n shell(\"gpg --detach-sign -a dist/{}\".format(file))", "def build(ctx: typer.Context):\n from .tasks import build, main\n\n sys.argv = sys.argv[:1] + (ctx.args or [\"list\"])\n main(vars(build))", "def start(context, project_name):\n\n gcc_version = '10-2020-q4-major-'\n os_extension = ''\n\n if platform.system() == 'Linux':\n if platform.machine() == 'x86_64':\n os_extension = 'x86_64-linux'\n else:\n os_extension = 'aarch64-linux'\n elif platform.system() == 'Darwin':\n os_extension = 'mac'\n elif platform.system() == 'Windows':\n os_extension = 'win32'\n\n final_branch_name = f'{gcc_version}{os_extension}'\n\n if not os_extension:\n click.secho(f'This system {platform.system()}:{platform.machine()} ' +\n 'is not supported for SJSU-Dev2 ', fg='red', bold=True)\n return -1\n\n click.secho(f'Creating project: {project_name}', fg='white', bold=True)\n Path(project_name).mkdir()\n\n click.echo(f' Creating \"{project_name}/.sj2\" directory')\n Path(f'{project_name}/.sj2').mkdir(exist_ok=True)\n Path(f'{project_name}/.sj2/reserved').touch(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/library\" directory')\n Path(f'{project_name}/library').mkdir(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/packages\" directory')\n Path(f'{project_name}/packages').mkdir(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/main.cpp\" source file')\n Path(f'{project_name}/main.cpp').write_text(BASIC_MAIN_CPP)\n\n click.echo('')\n\n context.invoke(install, library='libcore', tag='main',\n project_directory=project_name)\n context.invoke(install, library='libarmcortex',\n tag='main', project_directory=project_name)\n context.invoke(install, library='liblpc40xx', tag='main',\n project_directory=project_name)\n context.invoke(install, library='libstm32f10x',\n tag='main', project_directory=project_name)\n context.invoke(install, library='gcc-arm-none-eabi-picolibc',\n tag=final_branch_name, project_directory=project_name)", "def viewBuild (self, event = None):\r\n path = 'file://' + urllib.pathname2url(self.buildDestination)\r\n path = path.replace('file://///', 'file:///')\r\n wx.LaunchDefaultBrowser(path)", "def Run(self, *args, **kwargs):\n return utils.RunCmd(self.BuildCmd(*args, **kwargs))", "def buildStarted(builderName, build):", "def make(self):\n self.enterBuildDir()\n return utils.system(Arguments([self.makeProgram, self.makeOptions(self.subinfo.options.make.args)]))", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def build_all(self):\n self.android_build()\n self.generate_patch_build('')\n self.generate_specs_build()\n self.generate_interfaces()", "def run_build(self, dist: str, skip_tests: bool = False) -> None:\n\n if self._failed:\n print(\"not building %s due to earlier failure\" % (dist,))\n raise Exception(\"failed\")\n\n try:\n self._inner_build(dist, skip_tests)\n except Exception as e:\n print(\"build of %s failed: %s\" % (dist, e), file=sys.stderr)\n self._failed = True\n raise", "def project():", "def project():", "def project():", "def pre_build(self):", "def RemoteBuild(self, image):\n raise NotImplementedError()", "def build(self):\n logging.info(\"Building %s\", self.path)\n\n data = self.render()\n\n # Make sure a folder for the output path exists\n try: os.makedirs(os.path.dirname(self.paths['full-build']))\n except OSError: pass\n\n # Write the data to the output file\n f = codecs.open(self.paths['full-build'], 'w', 'utf-8')\n f.write(data)\n f.close()\n\n # Run all plugins\n self.site.pluginMethod('postBuildPage', self.site, self.paths['full-build'])", "def build(self):\n env = ConfigureEnvironment(self.deps_cpp_info, self.settings)\n\n set_path_command = \"\"\n # Download nasm as build tool. This should go to source()\n if self.options.SSE == True:\n if self.settings.os == \"Linux\":\n # TODO: We should build nasm from source then.\n self.options.SSE = False # Or is removing here better? I'm not familiar with python..\n else:\n nasm_version = \"2.12.02\"\n nasm_os_url_id = \"\" #nasm url identifier\n if self.settings.os == \"Windows\":\n if self.settings.arch == \"x86\":\n nasm_os_url_id = \"win32\"\n else:\n nasm_os_url_id = \"win64\" \n elif self.settings.os == \"Macos\":\n nasm_os_url_id = \"macosx\"\n nasm_folder_name = \"nasm-%s-%s\" % (nasm_version, nasm_os_url_id)\n nasm_zip_name = \"%s.zip\" % nasm_folder_name\n download(\"http://www.nasm.us/pub/nasm/releasebuilds/%s/%s/%s\" % (nasm_version, nasm_os_url_id, nasm_zip_name), nasm_zip_name)\n self.output.warn(\"Downloading nasm: http://www.nasm.us/pub/nasm/releasebuilds/%s/%s/%s\" % (nasm_version, nasm_os_url_id, nasm_zip_name))\n unzip(nasm_zip_name)\n os.unlink(nasm_zip_name)\n nasm_path = os.path.join(os.getcwd(), nasm_folder_name)\n\n #env.environ[\"PATH\"] += os.pathsep + nasm_path #its probably as easy as this, but i cant append to the path self.run operates in.\n if self.settings.os == \"Windows\":\n set_path_command = \"set \\\"PATH=%s\\\" &&\" % os.environ[\"PATH\"]\n else:\n set_path_command = \"PATH=\\\"%s\\\" &&\" % os.environ[\"PATH\"]\n\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n if self.options.fPIC:\n env_line = env.command_line.replace('CFLAGS=\"', 'CFLAGS=\"-fPIC ')\n else:\n env_line = env.command_line\n self.run(\"cd %s && autoreconf -fiv\" % self.ZIP_FOLDER_NAME)\n config_options = \"\"\n if self.settings.arch == \"x86\":\n if self.settings.os == \"Linux\":\n config_options = \"--host i686-pc-linux-gnu CFLAGS='-O3 -m32' LDFLAGS=-m32\"\n else:\n config_options = \"--host i686-apple-darwin CFLAGS='-O3 -m32' LDFLAGS=-m32\"\n\n if self.settings.os == \"Macos\":\n old_str = '-install_name \\$rpath/\\$soname'\n new_str = '-install_name \\$soname'\n replace_in_file(\"./%s/configure\" % self.ZIP_FOLDER_NAME, old_str, new_str)\n\n self.run(\"cd %s && %s ./configure %s\" % (self.ZIP_FOLDER_NAME, env_line, config_options))\n self.run(\"cd %s && %s make\" % (self.ZIP_FOLDER_NAME, env_line))\n else: # We should (for simplicity) always use cmake shouldnt we?\n conan_magic_lines = '''project(libjpeg-turbo)\n cmake_minimum_required(VERSION 3.0)\n include(../conanbuildinfo.cmake)\n CONAN_BASIC_SETUP()\n '''\n replace_in_file(\"%s/CMakeLists.txt\" % self.ZIP_FOLDER_NAME, \"cmake_minimum_required(VERSION 2.8.8)\", conan_magic_lines)\n replace_in_file(\"%s/CMakeLists.txt\" % self.ZIP_FOLDER_NAME, \"project(libjpeg-turbo C)\", \"\")\n \n cmake = CMake(self.settings)\n builddir = os.path.join(self.ZIP_FOLDER_NAME, \"_build\")\n\n if os.path.exists(builddir):\n shutil.rmtree(builddir) # We need to remove this folder first for windows\n os.makedirs(builddir)\n\n cmake_options = []\n if self.options.shared == True:\n cmake_options += [\"-DENABLE_STATIC=0\"]\n else:\n cmake_options = [\"-DENABLE_SHARED=0\"]\n cmake_options += [\"-DWITH_SIMD=%s\" % \"1\" if self.options.SSE else \"0\"]\n\n # why this comment: \"Don't change runtime, conan will take care of\"? conan_basic_setup() runs before this cmake option replaces MT with MD again\n cmake_options += [\"-DWITH_CRT_DLL=%s\" % \"1\" if self.settings.compiler.runtime == \"MD\" or self.settings.compiler.runtime == \"MDd\" else \"0\"]\n\n self.run('%s cd %s && cmake .. %s %s' % (set_path_command, builddir, cmake.command_line, \" \".join(cmake_options)))\n self.run(\"%s cd %s && cmake --build . %s\" % (set_path_command, builddir, cmake.build_config))", "def finished():\n print >>sys.stderr, 'build completed successfully'\n reactor.callFromThread(reactor.stop)", "def gen_project(project_name, project_revision, target, template, working_dir):\n gen_project_tcl(project_name, project_revision,\n target, template, working_dir)\n qsys_files = filter(lambda file: file.endswith(\".qsys\"), target.files_list)\n for file in qsys_files:\n gen_qsys_system_from_qsys_file(file, working_dir)\n\n log_msg = \"Generating project\"\n cmd = f\"cd {working_dir} && {QUARTUS_BIN_DIR}quartus_sh -t make_project.tcl\"\n log_file_path = working_dir + \"project_gen.log\"\n\n run_cmd_and_log(cmd, log_msg, log_file_path)", "def build(cfg, jobs, watch):\n libjobs.buildJobs(cfg, jobs, watch)", "def post_receive(repository, config, **_):\n confp = _get_config(repository, config)\n\n work_dir = Path(confp.get('repository', 'work_dir'))\n if not work_dir.is_absolute():\n work_dir = (repository / confp.get('repository', 'work_dir'))\n else:\n work_dir = work_dir.expanduser()\n\n work_dir = work_dir.resolve()\n\n # Checkout the files in the work dir\n if not work_dir.is_dir():\n work_dir.mkdir()\n _log(repository, 'INFO', \"Created work dir\")\n print(\"!! Created work dir %s\" % str(work_dir))\n\n sp.run(('git',\n '--work-tree=' + str(work_dir),\n '--git-dir=' + str(repository),\n 'checkout', '-q', '-f', str(confp.get('general', 'branch'))))\n\n # Copy new buildfile\n print(\"Checking for buildfile updat\")\n buildfile_update = (work_dir / '.cricicbuild')\n if buildfile_update.is_file():\n print(\"Updating buildfile\")\n buildfile = Path(repository) / confp.get('general', 'buildfile')\n buildfile_old = buildfile.read_bytes()\n buildfile.write_bytes(buildfile_update.read_bytes())\n\n # Quick dry-run to prevent writing bad buildfile\n valid = _make_targets(repository, confp, 'pre', dry_run=True)\n if not valid:\n _log(repository, 'INFO', \"Buildfile test failed\")\n print(\"!! Buildfile test failed\")\n buildfile.write_bytes(buildfile_old)\n sys.exit(1)\n\n # Remove buildfile from production\n buildfile_update.unlink()\n\n success = _make_targets(repository, confp, 'post', cwd=work_dir)\n if not success:\n _log(repository, 'ERROR', \"Post-receive failed to complete\")\n print(\"-- Cancelling further steps\")\n sys.exit(1)\n\n _log(repository, 'INFO', \"Build succesful\")", "def Rebuild(self, targets, arguments):\n self.Clean(targets, [])\n self.Build(targets, arguments)", "def build(self, force=False):\n if self.built and not force:\n return\n\n try:\n self.build_pd()\n self.build_warning()\n #self.build_propagation()\n self.build_cableconfig()\n self.built = True\n except Exception as e:\n print('Circuit build failed: ', e, ' Circuit number: ', self.circuitnr)", "def _run_ci_publish():\n _run_install(False)\n _run_tests(False)\n _run_publish(True)", "def do(args):\n worktree = qisys.parsers.get_worktree(args)\n\n project_name = args.project_name\n project_path = os.path.join(os.getcwd(), project_name)\n\n if os.path.exists(project_path):\n raise Exception(\"%s already exists\" % project_path)\n os.mkdir(project_path)\n copy_helper(project_name, project_path)\n\n if args.git:\n qisys.command.call([\"git\", \"init\"], cwd=project_path)\n with open(os.path.join(project_path, \".gitignore\"), \"w\") as fp:\n fp.write(\"build-*\\n\")\n qisys.command.call([\"git\" , \"add\" , \".\"], cwd=project_path)\n qisys.command.call([\"git\" , \"commit\" , \"-m\" , \"initial commit\"], cwd=project_path)\n\n ui.info(ui.green, \"New project initialized in\", ui.bold, project_path)\n worktree.add_project(project_path)\n return worktree.get_project(project_path)", "def test_main(self):\n self.createFakeSphinxProject()\n self.builder.main([self.sphinxDir.parent().path])\n self.verifyBuilt()", "def pre_build(self):\n pass", "def test_build_target(self, mock_run):\n self.args.cmake_source_project_root = '/tmp/falken_src'\n self.args.cmake_build_dir = '/tmp/build_folder'\n self.args.number_of_threads = 7\n\n runner = cmake_runner.CMakeRunner(self.installer.binary_dir,\n self.args.cmake_source_project_root,\n self.args.cmake_build_dir)\n\n build_cmake_project.build_target(runner, self.args, 'Debug')\n\n # Call cmake\n mock_run.assert_called_once_with(\n args='cmake --build /tmp/build_folder --verbose -j 7',\n check=True,\n shell=True)", "def main():\r\n\r\n # Init build folder\r\n\r\n print(\"Checking 'build' directory...\")\r\n\r\n if not os.path.isdir(BUILD_DIR_PATH):\r\n os.mkdir(BUILD_DIR_PATH)\r\n\r\n # Build resources\r\n\r\n print(\"Checking resources directory...\")\r\n\r\n if os.path.isdir(RESOURCES_DIR_PATH):\r\n print(\"Resources directory exists. Performing cleanup.\")\r\n shutil.rmtree(RESOURCES_DIR_PATH, ignore_errors=True)\r\n\r\n print(\"Building resources...\")\r\n\r\n shutil.copytree(\"resources/images\", IMAGES_DIR_PATH)\r\n\r\n # Build static page\r\n\r\n print(\"Building static page...\")\r\n\r\n os.chdir(BUILD_DIR_PATH)\r\n lib.fresh_tomatoes.open_movies_page(data.entertainment_center.MOVIES)\r\n\r\n print(\"Build complete!\")", "def move_files_into_build():\n build_root = os.path.join(template_path, 'build')\n create_python_package(build_root)\n\n build_buildbot = os.path.join(template_path, 'build', 'buildbot')\n create_python_package(build_buildbot)\n\n pythonify('runtests', [], ['build']) \n pythonify('master.cfg', ['buildbot'], ['build', 'buildbot'])", "def test_failToBuild(self):\n # note no fake sphinx project is created\n self.assertRaises(CalledProcessError, self.builder.build, self.sphinxDir)", "def build(self, gyp_file, target=None, **kw):\n raise NotImplementedError", "def trigger_test_build(\n commit, source_version, lambda_name):\n return trigger_build(\n commit, source_version, lambda_name, 'buildspec-test')", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def build():" ]
[ "0.69045925", "0.6808615", "0.6769827", "0.6756172", "0.65721595", "0.6517098", "0.6494844", "0.6461456", "0.6375029", "0.6318517", "0.6312991", "0.6210087", "0.6203009", "0.6105097", "0.60498416", "0.59993887", "0.5988052", "0.5903205", "0.5868917", "0.5858315", "0.5857097", "0.5855001", "0.5835281", "0.5833631", "0.581226", "0.57830405", "0.57789946", "0.57670295", "0.57538867", "0.57512367", "0.5706214", "0.5699421", "0.5692406", "0.5689196", "0.567284", "0.5670383", "0.565508", "0.5650733", "0.5630654", "0.56223506", "0.5619308", "0.5602507", "0.5595549", "0.55828625", "0.5579189", "0.55741173", "0.55582726", "0.5553709", "0.55312604", "0.5523597", "0.55092084", "0.5508958", "0.55021125", "0.5492776", "0.54913795", "0.5487712", "0.545974", "0.540817", "0.5399095", "0.53981626", "0.53945076", "0.53910667", "0.5387737", "0.53860545", "0.5381883", "0.5367434", "0.5364311", "0.5359337", "0.53552264", "0.5354941", "0.53511745", "0.5350461", "0.5349641", "0.5346302", "0.5337047", "0.5337047", "0.5337047", "0.53250915", "0.5308077", "0.5307347", "0.5304057", "0.5300693", "0.52950335", "0.528889", "0.5284271", "0.5277948", "0.527539", "0.52712166", "0.52705586", "0.5269156", "0.5268557", "0.52633446", "0.52545846", "0.52525365", "0.5241809", "0.52320856", "0.52297914", "0.52221835", "0.52221835", "0.52215743" ]
0.690302
1
Gets a specific number of builds from the project.
async def get_builds(self, *, quantity=10):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBuild(number):", "def getBuild(number):", "def getBuilds():", "def get_first_n_built_chunk_ids(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT chunk_id FROM index_builder WHERE ib_task = 'built' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def Builds():\n return builds", "def getPendingBuilds():", "def getPendingBuilds():", "def getBuild():", "def num_projects(self):\n return self._num_projects", "def concurrent(self, project):\n limit_reached = False\n query = Q(\n project=project,\n )\n\n if project.main_language_project:\n # Project is a translation, counts all builds of all the translations\n query |= Q(project__main_language_project=project.main_language_project)\n query |= Q(project__slug=project.main_language_project.slug)\n\n elif project.translations.exists():\n # The project has translations, counts their builds as well\n query |= Q(project__in=project.translations.all())\n\n # If the project belongs to an organization, count all the projects\n # from this organization as well\n organization = project.organizations.first()\n if organization:\n query |= Q(project__in=organization.projects.all())\n\n # Limit builds to 5 hours ago to speed up the query\n query &= Q(date__gt=timezone.now() - datetime.timedelta(hours=5))\n\n concurrent = (\n (\n self.filter(query).exclude(\n state__in=[\n BUILD_STATE_TRIGGERED,\n BUILD_STATE_FINISHED,\n BUILD_STATE_CANCELLED,\n ]\n )\n )\n .distinct()\n .count()\n )\n\n max_concurrent = Project.objects.max_concurrent_builds(project)\n log.info(\n \"Concurrent builds.\",\n project_slug=project.slug,\n concurrent=concurrent,\n max_concurrent=max_concurrent,\n )\n if concurrent >= max_concurrent:\n limit_reached = True\n return (limit_reached, concurrent, max_concurrent)", "def builds(self):\n return self._builds", "async def builds(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"builds\"], *args, **kwargs)", "def test_get_build_number(self):\n pass", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def get_builds(self, *, params: Optional[dict] = None) -> \"resource_types.Builds\":\n\n return communicator.Builds(self.__requester).fetch(parameters=params)", "def GetBuilds(date=0):\n\n # If date is set, get the build id from waterfall.\n builds = []\n\n if date:\n for builder in WATERFALL_BUILDERS + ROTATING_BUILDERS:\n build_ids = GetBuildID(builder, date)\n for build_id in build_ids:\n builds.append((builder, build_id))\n return builds\n\n # If date is not set, we try to get the most recent builds.\n # Read the values of the last builds used to generate a report, and\n # increment them appropriately, to get values for generating the\n # current report. (See comments in UpdateBuilds).\n with open(BUILD_DATA_FILE, 'r') as fp:\n lines = fp.readlines()\n\n for l in lines:\n l = l.rstrip()\n words = l.split(',')\n builder = words[0]\n build = int(words[1])\n builds.append((builder, build + 1))\n # NOTE: We are assuming here that there are always 2 daily builds in\n # each of the rotating builders. I am not convinced this is a valid\n # assumption.\n if builder in ROTATING_BUILDERS:\n builds.append((builder, build + 2))\n\n return builds", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')", "def generateFinishedBuilds(branches=[],\n num_builds=None,\n max_buildnum=None, finished_before=None,\n max_search=200,\n ):", "def get_pull_requests_count(self):\n repo_details = self.repo_url.strip().split('/')[-2:]\n pull_requests = 0\n i = 1\n while True:\n args = {'state': 'open', 'page': i, 'per_page': 100}\n api_url = \"https://api.github.com/repos/{}/{}/pulls?{}\".format(repo_details[0], repo_details[1],\n urllib.parse.urlencode(args))\n response = requests.request(\"GET\", api_url)\n response = json.loads(response.content)\n if not response:\n return pull_requests\n else:\n pull_requests += len(response)\n i += 1", "def getPendingBuildTimes():\n # TODO: it might be nice to make this into getPendingBuildSets, which\n # would let someone subscribe to the buildset being finished.\n # However, the Scheduler doesn't actually create the buildset until\n # it gets submitted, so doing this would require some major rework.", "def getBuildRequests():", "def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats:\n print('Gettings builds for {}...'.format(builder))\n # TODO: can we limit the data we're requesting?\n url = '{}/{}/builds/_all'.format(BASE_URL, builder)\n stats = BuildStats()\n for build, results in requests.get(url).json().items(): \n start_time = datetime.datetime.fromtimestamp(float(results['times'][0]))\n if start_time < time_window:\n continue\n successful = results['text'] == ['build', 'successful']\n stats.add(successful)\n return stats", "def generateFinishedBuilds(builders=[], branches=[],\n num_builds=None, finished_before=None,\n max_search=200):", "def num_projects(self, num_projects):\n\n self._num_projects = num_projects", "def collection_get(self):\n\n return {\n 'builds': self.build_info.get_builds(\n self.request.matchdict['product_name'],\n version=self.request.matchdict['product_version']\n )\n }", "def build_number(self):\n return self.get_data(\"build_number\")", "def build():\n return get_cached(\"build.json\", False).get(\"build_id\")", "def bamboo_builds(ctx, from_date, to_date, use_cache):\r\n\r\n if from_date is None:\r\n from_date, to_date = previous_month_range()\r\n\r\n log.info('Getting Bamboo builds between {} and {}'.format(from_date, to_date))\r\n report = BambooBuildsReport(\r\n ctx.obj,\r\n from_date=from_date,\r\n to_date=to_date\r\n )\r\n report.run_report(use_cache=use_cache)", "def get_build(self, build_id):\n pass", "def concurrent_builds(self):\n return self._concurrent_builds", "def get_first_n_crawled_chunks(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM crawler WHERE c_task = 'crawled' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def get_project_count(db):\n\n count = 0\n for element in db:\n count += 1\n return count", "async def get_builds(self, args):\n builds = []\n with self.connection.engine.begin() as conn:\n query = self.query(args)\n for row in conn.execute(query):\n build = dict(row)\n # Convert date to iso format\n if row.start_time:\n build['start_time'] = row.start_time.strftime(\n '%Y-%m-%dT%H:%M:%S')\n if row.end_time:\n build['end_time'] = row.end_time.strftime(\n '%Y-%m-%dT%H:%M:%S')\n # Compute run duration\n if row.start_time and row.end_time:\n build['duration'] = (row.end_time -\n row.start_time).total_seconds()\n builds.append(build)\n return builds", "def get_top_import_repo(self,n=None, _filter=None):\n\t\tentries = self.get_all(_filter)\n\t\tric = RepoImportCounter(self.package)\n\t\ti = 0\n\t\tfor entry in entries:\n\t\t\tif (i%1000 == 0):\n\t\t\t\tprint(i)\n\t\t\tric.parse(entry)\n\t\t\ti += 1\n\t\treturn ric.get_most_common(n)", "def get_new_build(old_version, new_version, build):\n\n # Version did not change, increment the current build number\n if old_version == new_version:\n return str(int(build) + 1)\n\n # Version changed, start over at 1\n else:\n return str(1)", "def builds(self):\n builds = [b for b in self.statuses() if b[\"type\"] == \"build\"]\n for build in builds:\n yield Build(build, **self._new_session_args)\n\n return", "def 取项目数(self): # real signature unknown; restored from __doc__\n return self.GetCount()", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def projects_count(args):\n session = GithubSession()\n\n print(f\"counting {args.name}\")\n\n board = session.get_project(args.name)\n\n tally = []\n\n columns = session.get_columns(board)\n for column in columns:\n print(column[\"name\"], file=sys.stderr)\n\n cards = list(session.get_cards(column))\n\n total = Decimal(0)\n unpointed = 0\n num_cards = 0\n num_walk_ins = 0\n issues = []\n walk_ins = []\n walk_in_points = 0\n\n for card_data in cards:\n issue_number = utils.get_issue_number_from_card_data(card_data)\n if not issue_number: # must be a note\n continue\n\n issue_data = session.get_issue(issue_number)\n labels = issue_data[\"labels\"]\n\n num_cards += 1\n\n points = get_points(labels)\n if points:\n total += points\n else:\n unpointed += 1\n\n issue_data = {\n \"issue_number\": issue_number,\n \"points\": str(points),\n \"unpointed\": points is None,\n \"walk_in\": False,\n }\n\n if is_walk_in(labels):\n num_walk_ins += 1\n if points:\n walk_in_points += points\n\n issue_data[\"walk_in\"] = True\n\n walk_ins.append(issue_data)\n\n issues.append(issue_data)\n\n tally.append(\n {\n \"column\": column[\"name\"],\n # 'issues': issues,\n \"num_cards\": num_cards,\n \"num_walk_ins\": num_walk_ins,\n \"walk_in_points\": str(walk_in_points),\n # 'walk_ins': walk_ins,\n \"total_points\": str(total),\n \"unpointed\": unpointed,\n }\n )\n\n print(json.dumps(tally, indent=4))", "def launch_project_sizing():\n from queries import IN_PRODUCTION_NEED_SCAN, NEW_NEED_SCAN, OTHER_NEED_SCAN\n if not getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED\",False):\n logger.error(\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\")\n return \"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\"\n\n prioritise_old = getattr(settings,\"GNMPLUTOSTATS_PRIORITISE_OLD\",False)\n if prioritise_old:\n logger.warning(\"GNMPLUTOSTATS_PRIORITISE_OLD is set, will only focus on old projects\")\n\n trigger_limit = int(getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_LIMIT\",10))\n to_trigger = []\n c=0\n\n logger.info(\"Gathering projects to measure\")\n\n if not prioritise_old:\n highest_priority = IN_PRODUCTION_NEED_SCAN.order_by('last_scan')\n for entry in highest_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if not prioritise_old and len(to_trigger)<trigger_limit:\n next_priority = NEW_NEED_SCAN.order_by('last_scan')\n for entry in next_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if len(to_trigger)<trigger_limit:\n everything_else = OTHER_NEED_SCAN.order_by('last_scan')\n for entry in everything_else:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n logger.info(\"Projects to scan: \".format(to_trigger))\n if len(to_trigger)==0:\n if prioritise_old:\n logger.error(\"No projects to scan and GNMPLUTOSTATS_PRIORITISE_OLD is set. You should disable this now to pick up new projects\")\n logger.info(\"No projects need to be scanned right now\")\n\n n=0\n for entry in to_trigger:\n n+=1\n calculate_project_size.apply_async(kwargs={'project_id': entry.project_id},queue=getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_QUEUE\",\"celery\"))\n return \"Triggered {0} projects to scan\".format(n)", "def build_show_statistics(ctx, args):\n for build_id in args:\n data = ctx.obj.get_build_statistics_by_build_id(build_id)\n output_json_data(data)", "def getCurrentBuilds():\n # again, we could probably provide an object for 'waiting' and\n # 'interlocked' too, but things like the Change list might still be\n # subject to change", "def list_build(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_build\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/builds'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1BuildList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def number_of_launches(self):\n return self._number_of_launches", "def build_number(self):\n return self._build_number", "def GetBuildNumFromBuilder(build_reason, bot_name, builder_host, builder_port):\n # Gets the buildbot url for the given host and port.\n server_url = _GetBuildBotUrl(builder_host, builder_port)\n buildbot_url = BUILDER_JSON_URL % {'server_url': server_url,\n 'bot_name': bot_name,\n 'build_num': '_all'\n }\n builds_json = _FetchBuilderData(buildbot_url)\n if builds_json:\n builds_data = json.loads(builds_json)\n for current_build in builds_data:\n if builds_data[current_build].get('reason') == build_reason:\n return builds_data[current_build].get('number')\n return None", "def GetBuildID(build_bot, date):\n day = '{day:02d}'.format(day=date%100)\n mon = MONTHS[date/100%100]\n date_string = mon + ' ' + day\n if build_bot in WATERFALL_BUILDERS:\n url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \\\n 'builders/%s?numbuilds=200' % build_bot\n if build_bot in ROTATING_BUILDERS:\n url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \\\n 'builders/%s?numbuilds=200' % build_bot\n command = 'sso_client %s' %url\n retval = 1\n retry_time = 3\n while retval and retry_time:\n retval, output, _ = \\\n command_executer.GetCommandExecuter().RunCommandWOutput(command, \\\n print_to_console=False)\n retry_time -= 1\n\n if retval:\n return []\n\n out = output.split('\\n')\n line_num = 0\n build_id = []\n # Parse the output like this\n # <td>Dec 14 10:55</td>\n # <td class=\"revision\">??</td>\n # <td failure</td><td><a href=\"../builders/gcc_toolchain/builds/109\">#109</a>\n while line_num < len(out):\n if date_string in out[line_num]:\n if line_num + 2 < len(out):\n build_num_line = out[line_num + 2]\n raw_num = re.findall(r'builds/\\d+', build_num_line)\n # raw_num is ['builds/109'] in the example.\n if raw_num:\n build_id.append(int(raw_num[0].split('/')[1]))\n line_num += 1\n return build_id", "def get_job_builds(self, job_id, started=None, finished=None,\n success=None, skipped=None, order='asc', limit=100):\n pass", "def GetBuildInfo(builder_name, build_num):\n url = '%s/json/builders/%s/builds/%s' % (BUILD_MASTER_URL,\n builder_name,\n build_num)\n return json.load(urllib2.urlopen(url))", "def get_number_of_submissions():\n\n start = time.time()\n print(\"counting submissions in\", TEST_SUBREDDIT, 'between', TEST_START_DATE, 'and', TEST_END_DATE)\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n end = time.time()\n print('time elapsed: ', end - start)\n print('total submissions:', len(threads))\n print(TEST_MAX)", "def get_last_successful_build_nr(jenkins_url, job_name):\n return execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/lastSuccessfulBuild/buildNumber\"\n )", "def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}", "def get_latest_results_for_project(self, project):\n session = self.session_factory()\n subq = session.query(\n PipelineRun.repository, func.max(cast(PipelineRun.run_id, Integer)).label('max_run_id')).group_by(\n PipelineRun.repository).filter_by(project=project).subquery('subq')\n results = session.query(PipelineRun).filter(\n PipelineRun.repository == subq.c.repository,\n PipelineRun.run_id == cast(subq.c.max_run_id, String)).all()\n session.close()\n return results", "def GetStructuredBuilds(self, latest_build_id=None,\n num_builds=DEFAULT_NUM_BUILDS, extra_filter_q=None):\n # If we're not given any latest_build_id, we fetch the latest builds\n if latest_build_id is not None:\n build_qs = ba_models.BuildTable.objects.filter(id__lte=latest_build_id)\n else:\n build_qs = ba_models.BuildTable.objects.all()\n\n if extra_filter_q is not None:\n build_qs = build_qs.filter(extra_filter_q)\n build_qs = build_qs.order_by('-id')\n build_qs = build_qs[:num_builds]\n\n # Critical for performance: Prefetch all the join relations we'll need.\n build_qs = build_qs.prefetch_related('buildstagetable_set')\n build_qs = build_qs.prefetch_related('clactiontable_set')\n build_qs = build_qs.prefetch_related(\n 'buildstagetable_set__failuretable_set')\n build_qs = build_qs.prefetch_related('annotationstable_set')\n\n # Now hit the database.\n build_entries = [x for x in build_qs]\n\n self._build_rows_map = {}\n build_rows = []\n for build_entry in build_entries:\n build_stage_entries = [x for x in build_entry.buildstagetable_set.all()]\n cl_action_entries = [x for x in build_entry.clactiontable_set.all()]\n failure_entries = []\n for entry in build_stage_entries:\n failure_entries += [x for x in entry.failuretable_set.all()]\n # Filter in python, filter'ing the queryset changes the queryset, and we\n # end up hitting the database again.\n annotations = [a for a in build_entry.annotationstable_set.all() if\n a.deleted == False]\n costly_annotations_qs = build_entry.annotationstable_set.filter(\n deleted=False)\n\n build_row = BuildRow(build_entry, build_stage_entries, cl_action_entries,\n failure_entries, annotations, costly_annotations_qs)\n\n self._build_rows_map[build_entry.id] = build_row\n build_rows.append(build_row)\n\n if build_entries:\n self._latest_build_id = build_entries[0].id\n\n return build_rows", "def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:\n print('getting list of builders...')\n stats = BuildStats()\n for builder in requests.get(BASE_URL).json().keys():\n # TODO: maybe filter the builds to the ones we care about\n stats += get_builder_stats(builder, time_window )\n return stats", "def trigger_job(revision, buildername, times=1, files=None, dry_run=False,\n extra_properties=None):\n repo_name = query_repo_name_from_buildername(buildername)\n builder_to_trigger = None\n list_of_requests = []\n LOG.info(\"We want to trigger '%s' on revision '%s' a total of %d time(s).\" %\n (buildername, revision, times))\n\n if not buildapi.valid_revision(repo_name, revision):\n return list_of_requests\n\n if not valid_builder(buildername):\n LOG.error(\"The builder %s requested is invalid\" % buildername)\n # XXX How should we exit cleanly?\n exit(-1)\n\n if files:\n builder_to_trigger = buildername\n _all_urls_reachable(files)\n else:\n builder_to_trigger, files = _determine_trigger_objective(\n revision,\n buildername,\n )\n\n if builder_to_trigger != buildername and times != 1:\n # The user wants to trigger a downstream job,\n # however, we need a build job instead.\n # We should trigger the downstream job multiple times, however,\n # we only trigger the upstream jobs once.\n LOG.debug(\"Since we need to trigger a build job we don't need to \"\n \"trigger it %s times but only once.\" % times)\n LOG.info(\"In order to trigger %s %i times, please run the script again after %s ends.\"\n % (buildername, times, builder_to_trigger))\n times = 1\n\n if builder_to_trigger:\n if dry_run:\n LOG.info(\"Dry-run: We were going to request '%s' %s times.\" %\n (builder_to_trigger, times))\n # Running with dry_run being True will only output information\n trigger(builder_to_trigger, revision, files, dry_run, extra_properties)\n else:\n for _ in range(times):\n req = trigger(builder_to_trigger, revision, files, dry_run, extra_properties)\n if req is not None:\n list_of_requests.append(req)\n else:\n LOG.debug(\"Nothing needs to be triggered\")\n\n return list_of_requests", "def bframes_count(**kwargs) -> int:\n path_project = kwargs['project_name']\n project_name = path_project.split( '/' )[-1].strip( '.' )\n if project_name in frames_count:\n return frames_count[project_name]['count']\n else:\n bpy.ops.wm.open_mainfile( filepath=path_project )\n count_frames = bpy.context.scene.frame_end\n frames_count[project_name] = {'project_name': project_name, 'count': count_frames}\n return count_frames", "def get_build(self):\n return self.bot_data_file[\"build\"]", "def build_list(ctx, show_url, show_data,\n start, count,\n project, build_type_id, branch, status, running, tags, user,\n output_format, columns):\n kwargs = {'start': start,\n 'count': count}\n if build_type_id:\n kwargs['build_type_id'] = build_type_id\n if branch:\n kwargs['branch'] = branch\n if status:\n kwargs['status'] = status\n if running:\n kwargs['running'] = running\n if tags:\n kwargs['tags'] = tags\n if user:\n kwargs['user'] = user\n if project:\n kwargs['project'] = project\n\n func = ctx.obj.get_builds\n\n if show_url:\n kwargs['return_type'] = 'url'\n url = func(**kwargs)\n del kwargs['return_type']\n click.echo(url)\n\n if not show_data:\n return\n\n try:\n data = func(**kwargs)\n except HTTPError as e:\n click.echo('url: %s' % e.url)\n click.echo('status_code: %s' % e.status_code)\n click.echo()\n click.echo(e)\n return\n\n for build in data['build']:\n details = ctx.obj.get_build_by_build_id(build['id'])\n try:\n build['user'] = details['triggered']['user']['username']\n build['statusText'] = details['statusText']\n build['details'] = details\n except KeyError:\n build['user'] = 'N/A'\n\n click.echo('count: %d' % data['count'])\n if data['count'] == 0:\n return\n\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['build'])\n elif output_format == 'json':\n output_json_data(data)", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def num_models():\n N = input(\"How many models would you like to test?\")\n N = int(N)\n return N", "def query_jobs(repo_name, revision):\n return buildapi.query_jobs_schedule(repo_name, revision)", "def build_browse(ctx, args):\n for build_id in args:\n data = ctx.obj.get_build_by_build_id(build_id)\n webbrowser.open(data['webUrl'])", "def get_api(self, project_id, api_id, build_num=None, asset_id=None): #pylint: disable=too-many-locals\n\n api = self.get_nondeleted_api(bytes(project_id, self._charset), bytes(api_id, self._charset))\n\n builds = []\n builds_counter = int(api[b\"builds:counter\"].decode(self._charset))\n\n for idx in range(1, builds_counter + 1):\n build_attrs = DynamicObject(json.loads(self._get_build_cellvalue(api, idx, \"attrs\")))\n\n secrets_data = json.loads(self._get_build_cellvalue(api, idx, \"secrets\"))\n for secret in secrets_data:\n secret.pop(\"value\", None)\n\n assets = []\n for key in [k for k in api.keys() if k.startswith(self._get_build_cellname(idx, \"assets-\"))]:\n key = key.decode(self._charset)\n curr_asset_id = key.replace(\"builds:{0}-assets-\".format(idx), \"\")\n asset_data = json.loads(self._get_build_cellvalue(api, idx,\n \"assets-{0}\".format(curr_asset_id)))\n asset_data = DynamicObject(asset_data)\n\n assets.append({\n \"id\": curr_asset_id,\n \"downloadPath\": asset_data.download_path,\n \"mountPath\": asset_data.mount_path,\n \"fileSize\": asset_data.file_size,\n \"md5sum\": asset_data.md5sum\n })\n\n droplet_data = self._get_build_cellvalue(api, idx, \"droplet\")\n droplet = None\n\n if droplet_data:\n droplet_data = DynamicObject(json.loads(droplet_data))\n droplet = {\n \"id\": droplet_data.id,\n \"downloadPath\": droplet_data.download_path,\n \"md5sum\": droplet_data.md5sum,\n \"fileSize\": droplet_data.file_size\n }\n\n builds.append({\n \"id\": build_attrs.id,\n \"build\": idx,\n \"description\": build_attrs.description,\n \"configuration\": {\n \"secrets\": secrets_data,\n \"environment\": json.loads(self._get_build_cellvalue(api, idx, \"env\")),\n \"assets\": assets,\n \"droplet\": droplet\n }\n })\n\n if build_num:\n curr_build = builds[build_num - 1]\n if not asset_id:\n return curr_build\n\n curr_build = DynamicObject(curr_build)\n asset = [asset for asset in curr_build.configuration.assets if asset[\"id\"] == asset_id]\n return asset[0]\n\n builds.sort(key=lambda item: item[\"build\"], reverse=True)\n\n\n return {\n \"id\": api_id,\n \"path\": api[b\"attrs:path\"].decode(self._charset),\n \"project\": {\n \"id\": project_id,\n \"name\": api[b\"project:name\"].decode(self._charset)\n },\n \"exposedPorts\": json.loads(api[b\"attrs:ports\"].decode(self._charset)),\n \"builds\": builds\n }", "def get(self, *args):\n params = self.parse_query_string(args[0])\n\n module = self.get_module(params)\n impl = module.ProductsBuilds(config=self.context)\n\n return impl.get(**params)", "def get_count(cls, project_id, session=None):\n return cls.db_repo.get_count(project_id, session=session)", "def get_buildings_for_user_count(user):\n return BuildingSnapshot.objects.filter(\n super_organization__in=user.orgs.all(),\n canonicalbuilding__active=True,\n ).count()", "def get_backlog(self, project, limit=None):\n ret_val = []\n resource = \"projects/{0:d}/iterations\".format(project.id)\n params = {\"fields\": Iteration.FIELDS, \"scope\": \"current_backlog\"}\n\n if limit:\n params[\"limit\"] = limit\n\n iterations = self._request(\"get\", resource, params=params)\n\n if not iterations:\n params[\"scope\"] = \"backlog\"\n iterations = self._request(\"get\", resource, params=params)\n\n for iteration in iterations:\n iteration = Iteration(iteration)\n ret_val.extend(iteration.stories)\n\n return ret_val", "def get_n_needed_pages(n_results):\n if n_results % N_RESULTS_FOR_PAGE == 0:\n return int(n_results / N_RESULTS_FOR_PAGE)\n else:\n return int(n_results / N_RESULTS_FOR_PAGE) + 1", "def fetch_buildings(self, planet=None):\n return self.fetch_levels(\"station\", planet, codes.buildings)", "def getRevisionLimit(self):\n return self.client.get(self.name +\"/_revs_limit\").getBodyData()", "def FindRecentBuilds(ab_client, branch, target,\n build_type='submitted',\n build_attempt_status=None,\n build_successful=None):\n kwargs = {\n 'branch': branch,\n 'target': target,\n }\n if build_type is not None:\n kwargs['buildType'] = build_type\n if build_attempt_status is not None:\n kwargs['buildAttemptStatus'] = build_attempt_status\n if build_successful is not None:\n kwargs['successful'] = build_successful\n builds = ab_client.build().list(**kwargs).execute().get('builds')\n\n # Extract the build_ids, convert to int, arrange newest to oldest.\n return sorted((int(build['buildId']) for build in builds), reverse=True)", "def get_set_build_percent_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [params.get('P', 50)]\n return args", "def do_top(cs, args):\n resp, data = cs.repositories.get_top(args.count)\n utils.print_list(data, ['name', 'count'], sortby='count')", "def test_list_artifacts_for_build(fake_client):\n artifacts = Artifacts(fake_client, \"base\")\n artifacts.list_artifacts_for_build(\"org_slug\", \"pipe_slug\", \"build_no\")\n url = \"base/organizations/org_slug/pipelines/pipe_slug/builds/build_no/artifacts/\"\n fake_client.get.assert_called_with(\n url, query_params={\"page\": 0}, with_pagination=False\n )", "def get_recent_release_with_count(self):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\t\tcursor.execute('''select count(*) from movies where release_year = 2016;''')\n\t\tpage_count = cursor.fetchone()[0]\n\t\tconnection.close()\n\t\tpage_count = int(ceil(page_count))\n\t\treturn page_count", "def run(organization, top_n, username, pat):\n print()\n try:\n raw_repos = get_repos(organization, username=username, pat=pat)\n except Exception as ex:\n click.echo('Error collecting repos')\n sys.exit(1)\n\n repos = []\n\n with Halo(text='Retrieving repos...', spinner='dots'):\n for raw_repo in raw_repos:\n repos.append(Repo(raw_repo))\n\n if len(repos) == 0:\n print('No public repos were found')\n sys.exit(0)\n\n with Halo(text='Retrieving pull requests...', spinner='dots'):\n try:\n with ThreadPoolExecutor(max_workers=5) as executor:\n future_to_repo = {executor.submit(get_prs, repo.pr_url, username, pat): repo for repo in repos}\n for future in as_completed(future_to_repo):\n repo = future_to_repo[future]\n\n repo.pr_count = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (repo.name, exc))\n sys.exit(1)\n\n top_star = sorted(repos, key=lambda repo: repo.stars, reverse=True)[:top_n]\n top_fork = sorted(repos, key=lambda repo: repo.forks, reverse=True)[:top_n]\n top_prs = sorted(repos, key=lambda repo: repo.pr_count, reverse=True)[:top_n]\n top_contrib = sorted(repos, key=lambda repo: repo.contrib, reverse=True)[:top_n]\n\n print_stars(top_star, top_n)\n print_forks(top_fork, top_n)\n print_prs(top_prs, top_n)\n print_contrib(top_contrib, top_n)", "def get_number_buildings (community, data_dir, diagnostics):\n datafile = os.path.join(data_dir, 'non-res_count.csv')\n try:\n data = int(\n read_csv(\n datafile ,\n comment = \"#\",\n index_col = 0,\n header = 0\n ).ix[community]['Buildings'].sum()\n )\n except (KeyError, ValueError):\n data = 0\n diagnostics.add_note(\"Non-residential Efficiency: Community \",\n (\"\" + community[0] + \" does not have an entry in \"\n \"\" + os.path.split(datafile)[1] + \", using 0\"\n ))\n\n return data", "def _get_n_jobs(self):\n self._validate_n_jobs()\n return deepcopy(self.n_jobs)", "def getPendingBuildsets():\n # TODO: this is not implemented anywhere", "def api_get_threads(request, count):\n\n if PARAMETER_TAG in request.GET:\n tag_name = request.GET[PARAMETER_TAG]\n if tag_name is not None:\n tag = get_object_or_404(Tag, name=tag_name)\n threads = tag.threads.filter(archived=False)\n else: \n threads = Thread.objects.filter(archived=False)\n\n if PARAMETER_OFFSET in request.GET:\n offset = request.GET[PARAMETER_OFFSET]\n offset = int(offset) if offset is not None else 0\n else:\n offset = 0\n\n threads = threads.order_by('-bump_time')\n threads = threads[offset:offset + int(count)]\n\n opening_posts = []\n for thread in threads:\n opening_post = thread.get_opening_post()\n\n # TODO Add tags, replies and images count\n opening_posts.append(_get_post_data(opening_post.id,\n include_last_update=True))\n\n return HttpResponse(content=json.dumps(opening_posts))", "def get_number_of_models():\n return 8", "def get_last_completed_build_number(jenkins_url, job_name):\n return execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/lastCompletedBuild/buildNumber\"\n )", "def max_count(self):\n return self.config.get('max_count', 500)", "def get_revision_count(self):\n response = self._get_request(\n DeckhandClient.get_path(DeckhandPaths.REVISION_LIST)\n )\n self._handle_bad_response(response)\n revisions = yaml.safe_load(response.text)\n return revisions['count']", "def projects_top_json():\n limit = request.args.get('limit') or 10\n #sort_by_score = request.args.get('score') or False\n #sort_by_update = request.args.get('update') or False\n pp = Project.query \\\n .filter_by(is_hidden=False) \\\n .order_by(Project.progress.desc()) \\\n .limit(limit).all()\n projects = expand_project_urls(\n [p.data for p in pp],\n request.host_url\n )\n return jsonify(projects=projects)", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def number_of_launches(self, number_of_launches):\n\n self._number_of_launches = number_of_launches", "def report(base_url):\n info = get(base_url)\n last_url = \"{0}/{1}\".format(base_url, info['lastCompletedBuild']['number'])\n last = get(last_url)\n return last", "def get_number_of_development(self):\n return self.n_develop", "def build_api_version(self):\n return self._build_api_version", "def get_pullReq_commits(pullreq_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #fetch 250 max commits\n pullReq_commits = get_requests(pullreq_url, user, passwd)\n\n return pullReq_commits", "def get_numberOfProjections(self):\n self._nproj = len(self._projIndices)\n return self._nproj", "def get(mbid, level='low', n=0):\n\n # Todo: /count/ ?\n\n # Set up URL\n url = API_URL.format(mbid=mbid, level=level+'-level')\n\n return requests.get(url, params=dict(n=n))", "def getNumBuildings(self, iPlayer, iBuilding):\n\t\tiCount = 0\n\t\tapCityList = PyPlayer(iPlayer).getCityList()\n\t\tfor pCity in apCityList:\n\t\t\tif pCity.getNumBuilding(iBuilding): iCount += 1\n\t\treturn iCount", "def getPreviousBuild():", "def get_ciceroscm_configurations(\n ciceroscm_version,\n ciceroscm_probabilistic_file,\n num_cfgs,\n):\n if CICEROSCM.get_version() != ciceroscm_version:\n # version strings for linux and windows might be different!\n raise AssertionError(CICEROSCM.get_version())\n\n with open(ciceroscm_probabilistic_file, \"r\") as fh:\n cfgs_raw = json.load(fh)\n ciceroscm_cfgs = [c for c in cfgs_raw[:num_cfgs][:]]\n\n LOGGER.debug(\"%d total cfgs\", len(ciceroscm_cfgs))\n\n return ciceroscm_cfgs", "def getNFiles(self, config, base, logger=None):\n if 'nfiles' in config:\n return galsim.config.ParseValue(config, 'nfiles', base, int)[0]\n else:\n return 189", "def get_total_num_clients(task):\n if task == 'stackoverflow_lr':\n return 342477\n else:\n raise ValueError(f'Unsupported task: {task}')" ]
[ "0.728129", "0.728129", "0.6382856", "0.62236434", "0.6215881", "0.6080445", "0.6080445", "0.59587866", "0.59204555", "0.57647854", "0.5759255", "0.5724999", "0.5719008", "0.56793606", "0.56029767", "0.5565936", "0.55610716", "0.5540158", "0.54715705", "0.54323786", "0.5412853", "0.5399856", "0.53983504", "0.53974426", "0.5380614", "0.53742117", "0.53711414", "0.5364596", "0.5361517", "0.53351253", "0.5264552", "0.5263952", "0.52527374", "0.52160746", "0.51966846", "0.5182989", "0.51825273", "0.51746213", "0.51603466", "0.51371104", "0.51290643", "0.5110578", "0.50881463", "0.50489026", "0.50401646", "0.50331855", "0.5015975", "0.49938795", "0.4989612", "0.49861246", "0.4980737", "0.498045", "0.49784023", "0.49696118", "0.49560675", "0.4941146", "0.49392483", "0.49281684", "0.49255624", "0.4920918", "0.49186233", "0.48674187", "0.48646942", "0.48595464", "0.48535636", "0.48471838", "0.48446554", "0.48424488", "0.484244", "0.48412263", "0.4840594", "0.4810841", "0.47992", "0.4798782", "0.47982594", "0.47839436", "0.4774945", "0.47728083", "0.47710162", "0.4767198", "0.4766906", "0.4761303", "0.47518897", "0.47518876", "0.4737944", "0.47305593", "0.4715322", "0.47150117", "0.4714475", "0.47124887", "0.47106418", "0.47035488", "0.47008532", "0.46976742", "0.46880558", "0.46875033", "0.46801162", "0.46762872", "0.46675858", "0.46621573" ]
0.73818636
0
Makes a standard GET request.
async def _get_request(self, url): # Request the specific URL async with self.session.get(url, headers=self.headers) as resp: # Finally return the response return await resp.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_GET(self):\n self.http_method = 'GET'\n self.response()", "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)", "def get(self, *args, **kw):\n kw['method'] = 'GET'\n return self.open(*args, **kw)", "def _get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def http_method_get():\n return 'GET'", "def do_GET(self):\r\n self._send_handler_response('GET')", "def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')", "def get(self, *args, **kwargs):\n return self._hit(\"GET\", *args, **kwargs)", "def get(self):\n self.get_or_post(method='GET')", "def get(self, *args, **kwargs):\n url = urljoin(self.instance(), args[0])\n return self._requests_call(util.requests_get, url, *args[1:], **kwargs)", "def _get(self, path=\"\", query={}, **kwargs):\n qs = urllib.urlencode(query)\n uri = force_json(self.uri + path) + \"?\" + qs\n return self.client.request(uri, method=\"GET\", **kwargs)", "def do_GET(self):\n self._try_to_process_request(self._handle_get_request)", "def get(self, url, query=None):\n # Perform get request with query filter\n if query is not None:\n return self._query(url, 'GET', params=quote(f'query=\"{query}\"'))\n\n # Perform simple get request\n return self._query(url, 'GET')", "def get(self, url):\n return self._request('GET', url)", "def __get(self, url, headers=None):\n return self.__req(url, \"GET\", headers=headers)", "def get(url, to_error=_default_to_error, **kwargs):\n\n return request('get', url, to_error=to_error, **kwargs)", "async def get(self, path, params=None, json_data=None):\n response = await self.request('GET', path, params, json_data)\n return response", "def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)", "def httpGet(self, url, parameters=None):\r\n return self.auth.get(url, parameters)", "def request_get(self, path, params=None):\n\tif params is None:\n\t\tparams = {}\n\t\trequest_url = self.host_url + path\n\t\ttry:\n\t\t\tresponse = self.session.get(request_url, auth=self.api_key, params=params)\n\t\texcept requests.RequestException as e:\n\t\t\traise self.DataUnavailable(\"Network exception\") from e\n\n\tif response.status_code != 200:\n\t\traise self.DataUnavailable(\n\t\t\t\"Unexpected response status (%s)\" % response.status_code\n\t\t)\n\n\treturn response.json()", "def simulate_get(self, path='/', **kwargs):\n return self.simulate_request('GET', path, **kwargs)", "def get(self, path):\n return self.request(path, method='GET')", "def get(self, url, *args):\n\n req_method = type(self.client).__name__\n\n if not url.startswith(\"http\"):\n\n if not url.startswith(\"/\"):\n url = \"/%s\" % url\n\n url = \"%s%s\" % (self.base, url)\n\n if req_method == \"FlaskClient\":\n self.client.get(url, headers=self.headers, *args)\n\n else:\n self.client.get(url, headers=self.headers, *args)", "def send_get(self, uri, data=None, ojson=None):\n return self.__send_request('GET', uri=uri, data=data, ojson=ojson)", "def simulate_get(app, path, **kwargs) -> _ResultBase:\n\n return simulate_request(app, 'GET', path, **kwargs)", "def _get(self, url):\n return self._request(url)", "def simulate_get(self, path='/', **kwargs) -> _ResultBase:\n return self.simulate_request('GET', path, **kwargs)", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def aget(url, **kwargs):\n return requests.get(url, **kwargs)", "def get(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError", "def get(self, uri, params=None, headers=None, auth=False):\n return self.send_request('GET', uri, params, headers, auth)", "def _get(self, url, **queryparams):\n url = urljoin(self.base_url, url)\n if len(queryparams):\n url += '?' + urlencode(queryparams)\n try:\n r = self._make_request(**dict(\n method='GET',\n url=url,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n return r.json()", "def read(self) -> requests.request:\n # Check if id is set,\n if self.args.id is not None:\n self.REQUEST_URL += str(self.args.id)\n\n # Send GET request\n return requests.get(self.REQUEST_URL)", "async def simulate_get(self, path='/', **kwargs) -> _ResultBase:\n return await self.simulate_request('GET', path, **kwargs)", "def do_GET(self):\r\n if not self._client_allowed():\r\n return\r\n\r\n try:\r\n (_, _, path, query, _) = urlparse.urlsplit(self.path)\r\n params = urlparse.parse_qs(query)\r\n # Give each handler a chance to respond.\r\n for prefix, handler in self._GET_handlers:\r\n if self._maybe_handle(prefix, handler, path, params):\r\n return\r\n # If no path specified, default to showing the list of all runs.\r\n if path == '/':\r\n self._handle_runs('', {})\r\n return\r\n\r\n self._send_content('Invalid GET request %s' % self.path, 'text/html')\r\n except (IOError, ValueError):\r\n pass # Printing these errors gets annoying, and there's nothing to do about them anyway.\r\n #sys.stderr.write('Invalid GET request %s' % self.path)\r", "def make_get_request(client, endpoint):\n return client.get(endpoint)", "def get(self, *args, **kwargs):\n return self.handle_get_request()", "def _get(self, request_obj):\n return self._execute_action(request_obj, 'GET')", "def get(self, *args, **kwargs):\n if len(args) != 1:\n raise TypeError('wrong number of arguments')\n return self._geturl.get(*args, **kwargs)", "def get(self, uri, extras=None):\r\n params = base.get_params(('uri', 'extras'), locals())\r\n return http.Request('GET', self.get_url(), params), parsers.parse_json", "def get(self, path, req = None, **kwargs):\n req = req or []\n return self.route(path, req=req+[filter_method(['GET'])], **kwargs)", "def api_get(self, path, query=None):\n return self._api_request(path, 'GET', query=query)", "def do_get_request(self, uri, headers, timeout_ms):\n return self._do_request('GET', uri, headers, None, timeout_ms, None)", "def do_GET(self):\n parsed_path = urlparse.urlparse(self.path)\n if parsed_path.path == '/books':\n return self.books()\n elif parsed_path.path == '/my_loans':\n return self.my_loans()\n return self.send_response(404)", "def http_get(self) -> Optional[pulumi.Input['HTTPGetActionArgs']]:\n return pulumi.get(self, \"http_get\")", "def do_GET(self):\n print(\"do_GET: got: \" + str(self.path))\n path_split = self.path.split(\"/\", 2) # ['', 'hello', 'nikolay/uuuuer']\n username = path_split[-1] # nikolay/uuuuer\n print(\"do_GET: Let's open db_conn\")\n connection = db_open()\n birth_date = db_select(username, connection)\n db_close(connection)\n\n days_until_bday = get_days_to_bday(birth_date)\n json_obj = http_construct_json(username, days_until_bday)\n\n self.http_send_reply(json_obj)\n print(\"do_GET: DONE! \\n\")", "def sr_get(self, route_or_uri, params=None, query=None, **kwargs):\n return self.__req(\n route_or_uri,\n params=params,\n query=query,\n op=self.get,\n raw_response=True,\n **kwargs,\n )", "def req_get(url, headers=None, params=None) -> Response:\n if params:\n url = \"{}?{}\".format(url, parse.urlencode(params))\n\n req = Request(url, headers=headers, method=\"GET\")\n\n with request.urlopen(req) as res:\n response = Response(res)\n return response", "def send_get(self, api_url, query=None):\n resp = requests.get(self.base_url + api_url, params=query)\n\n return resp", "def httpGet(self, url='', data='', params={}, headers={}):\n\n reply = self.httpRequest('GET', url, data, params, headers)\n return _WebObject(reply.json())", "def http_request(self, path=\"/\", method=\"GET\", host=None, port=None, json=False, data=None):\n\n host = host or '127.0.0.1'\n port = port or 8080\n url = get_url(host=host, port=port, path=path)\n\n return self.http_session.request(method, url, json=json, data=data)", "def send_simple_get_request(hostname, node, key, causal_payload=''):\n get_str = \"http://\" + hostname + \":\" + node.access_port + \"/kvs/\" + key\n data = {'causal_payload':causal_payload}\n if PRINT_HTTP_REQUESTS:\n print \"Get request: \" + get_str + ' data field:' + str(data)\n r = req.get(get_str, data=data)\n if PRINT_HTTP_RESPONSES:\n print r.text, r.status_code\n return r", "def _get(self):\n return self.request(method=\"get\", path=self.router.fields)", "def get(self, path='', **kwargs):\n\n r = self.session.get(self.url(path), **kwargs)\n self.log_request(r)\n return r", "def get_request(\n self,\n alias,\n uri,\n headers=None,\n data=None,\n json=None,\n params=None,\n allow_redirects=None,\n timeout=None):\n session = self._cache.switch(alias)\n # XXX workaround to restore library default behaviour. Not needed in new keywords\n redir = True if allow_redirects is None else allow_redirects\n\n response = self._common_request(\n \"get\",\n session,\n uri,\n params=params,\n headers=headers,\n data=data,\n json=json,\n allow_redirects=redir,\n timeout=timeout)\n\n return response", "def handle_get(self, api, command):\n return self._make_request_from_command('GET', command)", "def get(self, path: str, params: dict) -> dict:\n return self.request(\"GET\", path, params)", "def get(url, params=None, **kwargs):\n\n kwargs.setdefault('allow_redirects', True)\n return request('get', url, params=params, **kwargs)", "def _get(self, url, **kwargs):\n return self._http.get(self.cluster + url, timeout=self.timeout, **kwargs)", "def get(self, **kwargs):\r\n params = base.get_params(None, kwargs, serialize_param=serialize_param)\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, **kwargs):\r\n params = base.get_params(None, kwargs, serialize_param=serialize_param)\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, **kwargs):\r\n params = base.get_params(None, kwargs, serialize_param=serialize_param)\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, **kwargs):\r\n params = base.get_params(None, kwargs, serialize_param=serialize_param)\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, **kwargs):\r\n params = base.get_params(None, kwargs, serialize_param=serialize_param)\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, **kwargs):\r\n params = base.get_params(None, kwargs, serialize_param=serialize_param)\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def Get(Path: str, Params: Any = None, *, Callback: Callable[[int, Dict[str, Any]], None]) -> None:\n Request(\"GET\", Path, Params, None, Callback=Callback)", "def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n pass", "def test_get(self):\n url, port = self.server.address\n\n #couple of basic GETs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n # GETs with params\n r = self.client.get(\"http://{0}:{1}/get_with_params\".format(url, port),\n params=self.params)\n self.assertEqual(200, r.status_code)\n self.assertEqual(str(self.params), r.text)\n\n # GETs with ...?", "def do_GET(self):\n #if self.path.startswith('/api/'):\n # f = self.send_response_headers('api call')\n #else:\n f=self.route()\n if f==False:\n f = self.send_head()\n if f:\n try:\n self.copyfile(f, self.wfile)\n finally:\n f.close()", "def request_get(path, params, stream=False):\r\n process = __split_path(path)\r\n\r\n if process['status'] == 'success':\r\n conn = process['conn']\r\n info = MetaInfo()\r\n header = {'vendor_key': info.vendor_id}\r\n # check 2.x and 3.x differences in using urllib\r\n try:\r\n conn.request(\"GET\", process['req_path'] + \"?\" +\r\n urllib.urlencode(params), headers=header)\r\n except AttributeError:\r\n conn.request(\"GET\", process['req_path'] + \"?\" +\r\n urllib.parse.urlencode(params), headers=header)\r\n\r\n resp = conn.getresponse()\r\n\r\n if stream:\r\n resp_obj = resp\r\n else:\r\n assert isinstance(resp, object)\r\n resp_obj = resp.read()\r\n return HTTPConnect(resp.status, resp_obj)\r\n else:\r\n return HTTPConnect(404, process)", "def get(self, url, data=None, headers=None, follow_redirects=False):\n if data is not None:\n if isinstance(data, dict):\n data = urlencode(data, True)\n if '?' in url:\n url += '&%s' % data\n else:\n url += '?%s' % data\n return self._fetch(url, 'GET', headers=headers,\n follow_redirects=follow_redirects)", "def do_GET(s):\n s.send_response(200)\n s.send_header(\"Content-type\", \"text/html\")\n s.end_headers()\n s.wfile.write(\"<html><head><title>Title goes here.</title></head>\")\n s.wfile.write(\"<body>\")\n # If someone went to \"http://something.somewhere.net/foo/bar/\",\n # then s.path equals \"/foo/bar/\".\n #s.wfile.write(\"<p>You accessed path: %s</p>\" % s.path)\n\tprint(s.path)\n ans = query_function(s.path)\n\tprint(ans)\n s.wfile.write(ans)\n s.wfile.write(\"</body></html>\")", "def _get_request(self, endpoint, params=None, **kwargs):\n\n return requests.get(self.base_url + endpoint, params, **kwargs)", "def get(self, term=None):\r\n params = base.get_params(None, locals())\r\n\r\n return http.Request('GET', self.get_url(), params), parsers.parse_json", "def do_GET(self):\r\n path = self.path\r\n status_code, res = webServer.handle_get_msg(path)\r\n self.send_response(status_code)\r\n self.end_headers()\r\n self.wfile.write(res.encode())", "def get(path, params=None):\n url = request_url.format(path)\n req = requests.get(url, params=params)\n return req.json()", "def get(self, request):\n pass", "def do_GET(self):\n path = self.path.split('/')\n if len(path) == 3:\n key.key_events(path[2])\n self.send_head()", "def get(self, *args, **kwargs):", "def request(self, method, *path, **data):\n\t\theaders = data.pop('headers', {})\n\t\tversion = data.pop('version', None)\n\t\tjson = data.pop('json', True)\n\t\tpath = urljoin(*path)\n\t\treturn self._request(method, path, version, data, headers, json)", "def do_GET(s):\n APIHandler = API(s.DBConnection)\n APIHandler.handleGET(s.path)\n s.send_response(APIHandler.getResponse())\n s.end_headers()\n s.wfile.write(APIHandler.getData().encode())", "def get(cls, uri):\n return cls._perform_request(uri, 'GET')", "def get(*args, **kwargs):\n\n response = yield from aiohttp.request('GET', *args, **kwargs)\n return (yield from response.read())", "def _request(self, opts, query, query_key='q'):\n params = opts['params']\n params[query_key] = query\n resp = requests.get(opts['url'], params=params, headers=self._headers)\n if not resp.ok:\n raise Exception(\"Server threw an error for: {}\".format(resp.url))\n return resp.json()", "def do_GET(self):\n url_parsed = urlparse.urlparse(self.path)\n path = url_parsed.path.lstrip(\"/\")\n print url_parsed, path\n #import pdb; pdb.set_trace()\n if path == '':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(self.page)\n elif path == 'search':\n query = urlparse.parse_qs(url_parsed.query)\n keyword = query.setdefault(\"keyword\", \"python\")\n tqx = dict([q.split(':') for q in query['tqx'][0].split(';')])\n \n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n data_table = get_datatable(keyword)\n content = data_table.ToJSonResponse(req_id=int(tqx['reqId']))\n self.wfile.write(content)", "def get(url_ext, query_params={}, custom_err=None, timeout=DEFAULT_TIMEOUT):\r\n url = get_url() + url_ext\r\n # get request headers\r\n headers = get_headers()\r\n\r\n r = requests.get(url, params=query_params, headers=headers, timeout=timeout)\r\n return handle_response(r, \"GET\", custom_err)", "def get(self, path=None, ref=None):\r\n params = base.get_params(('ref', ), locals())\r\n url = self.get_url()\r\n\r\n if path:\r\n url = '{0}/{1}'.format(url, path)\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def simple_get(self, url):\r\n \"\"\"\r\n The simple_get function accepts a single url argument. \r\n It then makes a GET request to that url. \r\n If nothing goes wrong, you end up with the raw HTML content for the page you requested. \r\n If there were any problems with your request (like the url is bad or the remote server is down) \r\n then your functon returns None.\r\n \"\"\"\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if self.is_good_response(resp):\r\n return resp.content\r\n else:\r\n return None\r\n except RequestException as e:\r\n self.log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)" ]
[ "0.80039424", "0.80005825", "0.78939396", "0.78344136", "0.78143454", "0.7742659", "0.77352524", "0.7651303", "0.76458156", "0.7521482", "0.7480341", "0.7438129", "0.73957163", "0.7381664", "0.73425716", "0.7260178", "0.724971", "0.72437644", "0.72252953", "0.71955794", "0.7190132", "0.7184645", "0.7179268", "0.7171996", "0.71577615", "0.7144797", "0.71444863", "0.71317476", "0.7124505", "0.71055603", "0.71055603", "0.70983684", "0.70757496", "0.7037828", "0.703394", "0.7017583", "0.70137817", "0.70097274", "0.70073533", "0.7006722", "0.69973654", "0.6988345", "0.6956922", "0.6948163", "0.69191295", "0.6907675", "0.69063604", "0.6901707", "0.689342", "0.6884994", "0.6879331", "0.6852773", "0.68508387", "0.68417054", "0.6832082", "0.68309134", "0.68209237", "0.68098307", "0.6796663", "0.6765421", "0.6760635", "0.67577285", "0.67479146", "0.67479146", "0.67479146", "0.67479146", "0.67479146", "0.67479146", "0.6745514", "0.6744239", "0.67370164", "0.67266697", "0.6714714", "0.67127025", "0.6707435", "0.670064", "0.6699635", "0.66967934", "0.6693872", "0.66898733", "0.66857046", "0.6680555", "0.6677598", "0.66726625", "0.6646524", "0.66386694", "0.66376954", "0.6635129", "0.66286504", "0.66286105", "0.6610999", "0.6607556", "0.6607556", "0.6607556", "0.6607556", "0.6607556", "0.6607556", "0.6607556", "0.6607556", "0.6607556", "0.6607556" ]
0.0
-1
Makes a standard POST request.
async def _post_request(self, url, data): # Request the specific URL async with self.session.post(url, headers=self.headers, data=data) as resp: # Finally return the response return await resp.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post(self, *args, **kwargs):\n return self._request('post', *args, **kwargs)", "def post(self, *args, **kwargs):\n return self._requests_call(util.requests_post, *args, **kwargs)", "def do_POST(self,):\n self.http_method = 'POST'\n self.response()", "def http_method_post():\n return 'POST'", "def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)", "def post(self, *path, **data):\n\t\treturn self.request('POST', *path, **data)", "def http_post(self, **kwargs):\n return self.rabjcallable.post(**kwargs)", "def post(self, *args, **kw):\n kw['method'] = 'POST'\n return self.open(*args, **kw)", "def httpPost(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('POST', url, data, params, headers)", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)", "def _post(self, url, data=None):\n if data is not None:\n data = urllib.urlencode(data)\n return self._request(url, method='POST', payload=data)", "def post(self, *args, **kwargs):\n return self._hit(\"POST\", *args, **kwargs)", "def post(self, path, **post_args):\n return self.request(path, data=post_args, method='POST')", "def do_POST(self):\r\n self._send_handler_response('POST')", "def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)", "def httpPost(self, url, post_parameters=None):\r\n return self.auth.post(url, post_parameters)", "def post(self, *args, **kwargs):\n headers = self.post_headers\n headers.update(kwargs.get('headers', {}))\n kwargs['headers'] = headers\n return self._request('post', *args, **kwargs)", "def simulate_post(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'POST', path, **kwargs)", "def make_post_request(self, url, data):\n auth = (self.AUTH_ID, self.AUTH_TOKEN)\n headers = {'content-type': 'application/json'}\n return requests.post(url, data=data, auth=auth, headers=headers)", "def _post(self, url, **kwargs):\n return self._call('POST', url, kwargs)", "def post(self, url, body=None, headers=None, serialize=True):\n return self._request('POST', url, body, headers, serialize)", "def post(url, data=None, json=None, **kwargs):\n\n return request('post', url, data=data, json=json, **kwargs)", "def post(url, to_error=_default_to_error, data=None, json=None, **kwargs):\n\n return request('post',\n url, to_error=to_error, data=data, json=json, **kwargs)", "def _post_request(url, params):\n data = dumps(params).encode(\"utf-8\")\n request = requests.post(url, data=data)\n return request", "def _post(self, request_obj):\n return self._execute_action(request_obj, 'POST')", "def test_client_can_do_post_request(self):\n response = self.httpbin_4.test_requests_post_method()\n self.assertEqual(response.request.method, 'POST')\n self.assertEqual(response.status_code, 200)", "def simulate_post(self, path='/', **kwargs):\n return self.simulate_request('POST', path, **kwargs)", "def _createPostRequest(self, postBody: dict) -> object:\n request = HttpRequest()\n request.method = \"POST\"\n for name,value in postBody.items():\n request.POST[name]= value\n return request", "def post():\n pass", "def raw_post(\n self, uri: str, data: Optional[Dict] = None, json: Optional[Dict] = None, **kwargs\n ) -> requests.Response:\n return self.session.post(url=self._url(uri), data=data, json=json, **kwargs)", "def send_post(url):\n HEADERS['accept'] = 'application/vnd.yang.data+json'\n if not url.startswith('/'):\n url = \"/{}\".format(url)\n url = BASE_URL + url\n resp = requests.post(url, headers=HEADERS)\n return resp", "def post(self, data):\n return requests.post(self.url, headers=self.headers, data=data)", "def _post(self, path=\"\", body=None, **kwargs):\n uri = force_json(self.uri + path)\n return self.client.request(uri, method=\"POST\", d=body, **kwargs)", "def api_post(self, path, data):\n return self._api_request(path, 'POST', data)", "def post(self, path: str, params: dict) -> dict:\n return self.request(\"POST\", path, params)", "def apost(url, **kwargs):\n return requests.post(url, **kwargs)", "def make_post_request(url:str, post_params:dict, **kwargs):\n\n print(\"Making call to '{}'...\".format(url))\n resp = requests.post(url, data=post_params, **kwargs)\n print(\"Received response.\")\n\n if not resp.ok:\n return False, resp.status_code, json.loads(resp.content)\n\n return True, resp.status_code, json.loads(resp.content)", "def __post(self, url, payload=None, headers=None):\n if headers is None:\n headers = {\"Content-Type\": \"application/json\"}\n return self.__req(url, \"POST\", body=payload, headers=headers)", "async def post(self, path, params=None, json_data=None):\n response = await self.request('POST', path, params, json_data)\n return response", "def do_POST(self):\r\n self.do_GET()", "def post(self, url_or_path, *args, **kwargs):\n return self.request.post(url_or_path, *args, **kwargs).json()", "def post(self):\n data = request.json\n return save_new_post(data=data)", "def post(self):\n code, status = run_handlers.handle_data_post(self.request.headers, self.request.body)\n self.set_status(code)\n self.write(status)\n self.finish()", "def simulate_post(self, path='/', **kwargs) -> _ResultBase:\n return self.simulate_request('POST', path, **kwargs)", "def PostRequest(self):\n if self.__Payload: \n self.__Answer = requests.post(self.__URL, data = self.__Payload, headers = self.__Headers)\n Logs.WriteToLog(\"Data transited to web server\")\n else:\n Logs.WriteToLog(\"No payload in HTTP request\")\n raise Exception(\"Payload must be setted\")", "def send_post(self, uri, data=None, ofile=None, ojson=None):\n return self.__send_request('POST', uri=uri, data=data, ofile=ofile, ojson=ojson)", "def post_form(url, headers, payload):\n\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n\n return RestClient.make_post_request(url, headers=headers, data=payload)", "def do_POST(self):\n self._try_to_process_request(self._handle_post_request)", "def post(self):\n created = post_tool(request.json)\n return created, 201", "async def simulate_post(self, path='/', **kwargs) -> _ResultBase:\n return await self.simulate_request('POST', path, **kwargs)", "def _post(self, path, data=None):\n headers = {'content-type': 'application/json'}\n if data:\n data = json.dumps(data)\n r = requests.post(self._url(path), data=data, headers=headers)\n assert r.status_code == 200\n return r", "def post_action(self, path, data=None):\n response = self._request(\"POST\", path, urllib.urlencode(data) if data else None)\n return self._handle_response(response)", "def post(self):\n self.get_or_post(method='POST')", "def post(self, path, req = None, **kwargs):\n req = req or []\n return self.route(path, req=req+[filter_method(['POST'])], **kwargs)", "def post(self, *args, **kwargs):\n return self.handle_post_request()", "def sr_post(self, route_or_uri, data, params=None, raw_response=False, **kwargs):\n op = lambda r: self.post(r, json=data)\n return self.__req(\n route_or_uri,\n params=params,\n query={},\n op=op,\n raw_response=raw_response,\n **kwargs,\n )", "def Post(Path: str, Params: Any = None, Data: Any = None, *, Callback: Callable[[int, Dict[str, Any]], None]) -> None:\n Request(\"POST\", Path, Params, Data, Callback=Callback)", "def post(self, url, data=None):\r\n response = self.requestHelper.post(url, data=data)\r\n return self.process(response)", "def _post(self, url, data=None):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='POST',\n url=url,\n json=data,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n\n if r.status_code == 204:\n return None\n return r.json()", "def post(self, url, *args):\n\n req_method = type(self.client).__name__\n\n if not url.startswith(\"http\"):\n if not url.startswith(\"/\"):\n url = \"/%s\" % url\n url = \"%s%s\" % (self.base, url)\n\n if req_method == \"FlaskClient\":\n self.client.post(url, headers=self.headers, *args)\n\n else:\n self.client.post(url, headers=self.headers, *args)", "def post(self, data):\n req = self._new_http_request()\n req.add_data(self._prepare_data(data))\n\n return self._urllib2.urlopen(req)", "def do_POST(self):\n\n # do this before sending any response as we may raise an exception\n content = self.__get_content(self.__get_post_body())\n\n # send the response back to the client\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/json\")\n self.end_headers()\n self.wfile.write(content)", "def post(self, url, payload={}):\n response = self._make_request(\"POST\", url, payload)\n\n return response", "def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()", "def post(self, uri, body=None, headers=None, auth=False):\n return self.send_request('POST', uri, body, headers, auth)", "def post(self, url, data):\r\n print(f\"POST {url}\")\r\n print(\"data:\")\r\n self.pp.pprint(data)\r\n response = self.session.post(url, data=data)\r\n print(f\"STATUS {response.status_code}\")\r\n self.print_cookies()\r\n return response", "def post(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError", "def post(self):", "def post(self, url, data):\n return self.app.post(get_url(url), data=data, follow_redirects=True)", "def _make_request(self, payload, headers=None):\n pathparts = REQ_PATH.split(b\"/\")\n if pathparts[0] == b\"\":\n pathparts = pathparts[1:]\n dreq = DummyRequest(pathparts)\n dreq.requestHeaders = Headers(headers or {})\n dreq.responseCode = 200 # default to 200\n\n if isinstance(payload, dict):\n payload = json.dumps(payload)\n\n dreq.content = BytesIO(payload.encode())\n dreq.method = \"POST\"\n\n return dreq", "def send_post(url, data, headers, return_output=False):\n req = requests.post(url=url, data=json.dumps(data), headers=headers)\n if return_output:\n return req\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def test_post_method(self):\n self.getPage('/', method='POST')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')", "def post(cls, uri, payload):\n return cls._perform_request(uri, 'POST', payload)", "def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n pass", "def post(self, url, body):\n return self._query(url, 'POST', json=body)", "def do_POST(self):\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n self.body = cgi.FieldStorage(fp=self.rfile,\n headers=self.headers, environ = {'REQUEST_METHOD':'POST'},\n keep_blank_values = 1, strict_parsing = 1)\n # throw away additional data [see bug #427345]\n while select.select([self.rfile._sock], [], [], 0)[0]:\n if not self.rfile._sock.recv(1):\n break\n self.handle_data()", "def post_request(self, path='', data=None, user=None, **kwargs):\n request = self.rf.post(path, data, **kwargs)\n request.user = user or self.super_user\n return request", "def post(self, url, data=None, params=None):\n return self.session.post(url=self.base_url + url, data=data,\n params=params)", "def do_POST(self):\n logger.info(\"Received a POST request: {}\".format(self.path))\n path = self.strip_path()\n if path == SET_OP:\n self.send_response(self.process_set_request())\n elif path == COMMIT_OP:\n self.send_response(self.process_commit_request())\n else:\n logger.error(\"Invalid POST operation {} was received.\".format(path))\n self.send_response(404)", "def do_post_request(self, uri, headers, payload, timeout_ms, stats_config):\n return self._do_request('POST', uri, headers, payload, timeout_ms,\n stats_config)", "def post(self, request):\n pass", "def post(self, **kwargs):\n return self.client.post(\n self.url(), data=json.dumps(kwargs),\n content_type='application/json')", "def post(self, uri, data=None, json=None, **kwargs):\n return self.session.post(uri, data=data, json=json, **kwargs)", "def post(self, data=None, params=None):\n params = self.parameters(additional_parameters=params)\n res = post(self.endpoint_url, data=data, params=params)\n return Response(res)", "def _http_post(self, path, data):\n # Prepare the request path\n if path[0] == '/':\n path = path[1:]\n path = urljoin(self.servlet_path, path)\n\n # Request the end points\n conn = httplib.HTTPConnection(\"localhost\", self.port)\n conn.request(\"POST\", path, data, {\"Content-Type\": \"application/json\"})\n result = conn.getresponse()\n data = result.read()\n conn.close()\n\n # Convert the response to a string\n return result.status, to_str(data)", "def _PostRequest(self, data=None):\n # requests will use about 3 times of data size's memory.\n req = requests.Request(\n 'POST',\n url=self._target_url,\n headers={'Multi-Event': 'True',\n 'Node-ID': str(self.GetNodeID())},\n files=data).prepare()\n clen = int(req.headers.get('Content-Length'))\n # Checks the size of request, and doesn't send if bigger than maximum size.\n if clen > self._max_bytes:\n return (413, 'Request Entity Too Large: The request is bigger '\n 'than %d bytes' % self._max_bytes, clen)\n resp = requests.Session().send(req, timeout=http_common.HTTP_TIMEOUT)\n if resp.headers['Maximum-Bytes']:\n self._max_bytes = int(resp.headers['Maximum-Bytes'])\n return resp.status_code, resp.reason, clen", "def test_post(self):\n url, port = self.server.address\n\n #couple of basic POSTs\n #request parameters\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def post(api, url, payload, headers=None, auth=_KERBEROS_AUTH, proxies=None,\n retries=_NUM_OF_RETRIES, timeout=None):\n return call(api, url, 'post', payload=payload,\n headers=headers, auth=auth, proxies=proxies, retries=retries,\n timeout=timeout)", "def post(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'post', api_path, *args, **kwargs)", "async def post(self, url, params=None, json_data=None):\n if self._authenticated:\n return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)", "def test_doPost(self) -> None:\n status_code = apicall.doPost(URL, self._browserheader)\n assert status_code != API_SUCCESS", "def post(call,\n headers=None,\n data=None,\n params=None,\n base=cloudBase,\n no_headers=False,\n raw=False,\n **kwargs):\n return _call(method=requests.post,\n call='{0}{1}'.format(base, call),\n headers=headers,\n data=data,\n params=params,\n no_headers=no_headers,\n raw=raw,\n **kwargs)", "def post_request(\n self,\n alias,\n uri,\n data=None,\n json=None,\n params=None,\n headers=None,\n files=None,\n allow_redirects=None,\n timeout=None):\n session = self._cache.switch(alias)\n if not files:\n data = utils.format_data_according_to_header(session, data, headers)\n # XXX workaround to restore library default behaviour. Not needed in new keywords\n redir = True if allow_redirects is None else allow_redirects\n\n response = self._common_request(\n \"post\",\n session,\n uri,\n data=data,\n json=json,\n params=params,\n files=files,\n headers=headers,\n allow_redirects=redir,\n timeout=timeout)\n return response", "def _req_post(self, url: str, data, raw_res: bool = False):\n self._get_cookies()\n if not self._cookies:\n return\n r = reqtry.post(url, cookies=self._cookies, data=data, allow_redirects=False, timeout=(3, 3), tries=3, delay=1,\n backoff=1.5, jitter=(1, 1.5))\n if raw_res:\n return r\n assert r.status_code == 200, f\"Post request: Invalid http status code: {r.status_code}\"\n assert '\"errCode\":0' in r.text, f'Post response with error from server. Response: {r.text}'\n return r.text", "def do_POST(s):\n\t\tprint 'a'\n\t\tif s.path == '/execute':\n\t\t\tjson = draw.begin_draw()\n\n\t\ts.send_response(200)\n\t\ts.send_header(\"Content-type\", \"text/json\")\n\t\ts.end_headers()\n\n\t\tjson2 = json.encode('utf-8')\n\t\ts.wfile.write(json2)", "def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)", "def _request_post(self, path, method='POST', body=None, headers=None):\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n \n response, content = super(DSBaseService, self)._request(url,\n method=method,\n body=str(body).replace(\"'\", '\"'),\n headers=headers)\n if int(response['status']) in (200, 204):\n if content != \"\":\n res_text = json.loads(content)\n else:\n res_text = \"\"\n post_response = {\n 'status': response['status'],\n 'message': 'SUCCESS',\n 'content': []\n }\n post_response['content'].append(res_text)\n return post_response\n else:\n raise RuntimeError('{} responded with status code {}'.format(url, response['status']))", "def post(self):\n pass" ]
[ "0.79926836", "0.7943902", "0.77661425", "0.77203214", "0.7624726", "0.7573087", "0.7498776", "0.74799937", "0.74722564", "0.7380111", "0.7328185", "0.73026544", "0.7286208", "0.728303", "0.7274627", "0.7264573", "0.72540253", "0.721007", "0.7161422", "0.70747304", "0.7067027", "0.7047475", "0.7030847", "0.7017357", "0.6996674", "0.69784725", "0.69651806", "0.6963913", "0.6962339", "0.6955725", "0.6941618", "0.69216317", "0.69180626", "0.6917264", "0.68771654", "0.6867687", "0.6857017", "0.68552834", "0.68468976", "0.68454", "0.6844821", "0.6838857", "0.68382293", "0.68212867", "0.68149537", "0.6798923", "0.67971975", "0.6796933", "0.6794886", "0.6773251", "0.6767571", "0.67628527", "0.675628", "0.6740451", "0.6739031", "0.6702056", "0.66965216", "0.6687498", "0.66379696", "0.659643", "0.6593328", "0.65931904", "0.6575541", "0.6572962", "0.6568192", "0.654971", "0.6538898", "0.653727", "0.65270114", "0.652507", "0.6518012", "0.6516297", "0.65144247", "0.651216", "0.6505315", "0.64846975", "0.64783025", "0.64772207", "0.6469655", "0.6449463", "0.6449434", "0.64409643", "0.6439686", "0.643838", "0.6437904", "0.638643", "0.6378079", "0.6372463", "0.6372463", "0.6372463", "0.636858", "0.6367247", "0.63536423", "0.6352972", "0.63528687", "0.63521296", "0.6352045", "0.63508344", "0.6346515", "0.634136", "0.6339671" ]
0.0
-1
Return `ret_value` `times` times. If generator will receive some value from outside, update `ret_value`
def exercise_gen(ret_val, times):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def counter_wrapper(generator):\n for value in generator:\n yield value", "def counter_wrapper_2(generator):\n yield from generator", "def _mc_gen():\r\n n = 1\r\n while True:\r\n yield n\r\n n += 1", "def random_values():\n while True:\n yield random()", "def Count():\n return CheckForError(lib.Generators_Get_Count())", "def repeat(value: T, times: int) -> List[T]:\n return [value] * times", "def task5(count):\n number_1, number_2 = 1, 1\n for _ in range(count):\n yield number_1\n number_1, number_2 = number_2, number_1 + number_2", "def next(self):\n self.lock.acquire()\n self.count += self.step;\n result = self.count\n self.lock.release()\n return result", "async def async_generator() -> Generator[float, None, None]:\n for _ in range(10):\n await asyncio.sleep(1)\n yield random.random() * 10", "def generator(factor: int, test: typing.Callable[[int], bool],\n start: int) -> typing.Iterator[int]:\n value = start\n while True:\n value = (value * factor) % 2147483647\n if test(value):\n yield value", "def iter_latest_asynchonously(gen_func, timeout = None, empty_value = None, use_forkserver = False, uninitialized_wait = None):\n if use_forkserver:\n from multiprocessing import set_start_method # Only Python 3.X\n set_start_method('forkserver') # On macos this is necessary to start camera in separate thread\n\n m = Manager()\n namespace = m.Namespace()\n\n lock = Lock()\n\n with lock:\n namespace.time_and_data = (-float('inf'), Uninitialized)\n\n p = Process(target=_async_value_setter, args=(gen_func, namespace, lock))\n p.start()\n while True:\n with lock:\n lasttime, item = namespace.time_and_data\n if item is PoisonPill: # The generator has terminated\n break\n elif item is Uninitialized:\n if uninitialized_wait is not None:\n time.sleep(uninitialized_wait)\n continue\n else:\n yield empty_value\n elif timeout is not None and (time.time() - lasttime) > timeout: # Nothing written or nothing recent enough\n yield empty_value\n else:\n yield item", "def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def random_number_generator(arg1, arg2):\n return 42", "def multiple_gen(modulus):\n count = 1\n while True:\n yield modulus * count\n count += 1", "async def async_generator() -> Generator[float, None, None]:\n\n for i in range(10):\n yield random.random()\n await asyncio.sleep(1)", "def next(self):\n self.attempt += 1\n if self.attempt > self.max_retries:\n raise StopIteration\n return self.slot_duration * random.randint(0, 2 ** self.attempt - 1)", "async def async_generator() -> Generator[float, None, None]:\n for i in range(10):\n yield (random.uniform(0, 10))\n await asyncio.sleep(1)", "def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value", "def repeat_count(instance, args):\r\n count = instance.repeat_count(args)\r\n return count", "def counter():\n for value in range(5):\n yield \"<{}>\".format(value)", "def next ( num = 1 ) :\n return run ( num )", "def counter(self, value: int, /) -> None:", "def generator(self):\n return [None, 1]", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def timeit_context() -> Generator:\n result = TimeItResult()\n started_time = time.time()\n try:\n yield result\n finally:\n result.time_passed = time.time() - started_time", "def data_repeated(data):\n\n def gen(count):\n for _ in range(count):\n yield data\n\n yield gen", "def __next__(self):\n if self.returned >= len(self):\n raise StopIteration\n else:\n val = self.buffer[self.current]\n self.current = (self.current + 1) % len(self.buffer)\n self.returned += 1\n return val", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def _produce_value(self,gen,force=False):\n if hasattr(gen,\"_Dynamic_time_fn\"): \n time_fn = gen._Dynamic_time_fn\n else:\n time_fn = self.time_fn\n \n if time_fn is None:\n value = produce_value(gen)\n gen._Dynamic_last = value\n else:\n \n time = time_fn()\n\n if force or time>gen._Dynamic_time:\n value = produce_value(gen)\n gen._Dynamic_last = value\n gen._Dynamic_time = time\n else:\n value = gen._Dynamic_last\n\n return value", "def times(self):\n \n class IterTimes:\n def __init__(self, st):\n self.c = 0\n self.st = st\n \n def __iter__(self):\n return self\n \n def next(self):\n t = self.st.time(self.c)\n if t == None or self.c == self.st.longitud:\n raise StopIteration\n else:\n self.c += 1\n return t\n \n return IterTimes(self)", "def countdown():\n for i in range(100, 0, -1):\n yield i", "def generator_count(self, gen):\n if len(gen) != 1 or gen.array_form[0][1] < 0:\n raise ValueError(\"gen must be a generator\")\n s = gen.array_form[0]\n return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])", "def generator(self, random, args):\r\n if self.duplicates:\r\n max_count = [self.capacity // item[0] for item in self.items]\r\n return [random.randint(0, m) for m in max_count]\r\n else:\r\n return [random.choice([0, 1]) for _ in range(len(self.items))]", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def seed(self, *args, **kwargs):\n return partial(self.set_return_value, args, kwargs)", "def very_simple():\n yield 1", "def Next():\n return CheckForError(lib.Generators_Get_Next())", "def convergence_processor(self):\n while True:\n rexp = (yield)\n self.converged = True\n self.converged_time = int(rexp.group(2))", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def run(self) -> int:\n self._times_called += 1\n return self._times_called", "async def run_generator(pos: int):\n LOGGER.info(f\"Final sequence: {[i async for i in sleep_generator(pos)]}\")", "def calls(self, arg=1):\r\n call_counter()\r\n return arg, call_counter.call_count", "def test_generator_scope():\n def inner(val):\n print(\"inner running\")\n return [0, val]\n gen = (a for a in inner(10))\n print(\"generator created\")\n return gen", "def count():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n n = 0\n try:\n while True:\n (yield)\n n += 1\n except GeneratorExit:\n target.send(n)\n target.close()\n\n return _dagpype_internal_fn_act", "def test_returned_num_in_random_nums(self):\n self._setup_random_gen([1.], [42])\n self.assertEqual(self._random_gen.next_num(), 42)", "def testExplicitGeneratorUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.g(8, 9)\n\t\tc.generator()\n\t\tc.setReturn(10)\n\t\tc.setReturn(11)\n\t\tc.replay()\n\t\tself.failUnless([k for k in x.g(8, 9)] == [10, 11])", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def inference_generator(env, storage, pipe, arrival_rate):\n\n global num_clients, trace, last_inf_times, request_times\n for i in itertools.count():\n random_request_time = random.expovariate(arrival_rate)\n cumulative_request_time = last_inf_times + random_request_time\n last_inf_times = cumulative_request_time\n request_times.append(cumulative_request_time)\n yield env.timeout(random_request_time)\n num_clients +=1\n d = {'idx' : num_clients, 'request_time' : env.now}\n pipe.put(d)", "def values(self):\n while True:\n try:\n yield self.value\n except GPIODeviceClosed:\n break", "def repeat_value(value: Any = None, repeat_count: int = None) -> ObservableBase:\n from ..operators.observable.repeat import repeat_value\n return repeat_value(value, repeat_count)", "def id_generator():\n start_value = 0\n while True:\n yield start_value\n start_value += 1", "def value_iteration_returns(problem, epsilon):\n next_returns = {state: 0 for state in problem.states()}\n\n for round_count in count(start=1):\n returns = next_returns\n\n next_returns = {\n state: problem.state_reward(state)\n + problem.discount() * max((\n sum(\n problem.state_action_result_dist(\n state, action\n ).get(next_state, 0) * returns[next_state]\n for next_state in problem.states()\n )\n for action in problem.state_actions(state)\n ), default=0)\n for state in problem.states()\n }\n max_update_size = max(abs(next_returns[state] - returns[state])\n for state in problem.states())\n if max_update_size < epsilon:\n break\n\n return returns, round_count", "def renumber():\n\n counter = itertools.count(1)\n while True:\n yield 's%s'%counter.next()", "def shotgenerator():\n return random.randint(0, 9), random.randint(0, 9)", "def repeat(times, intensive_times=None):\n if intensive_times is None:\n return repeat_with_success_at_least(times, times)\n\n casual_test = bool(int(os.environ.get('CUPY_TEST_CASUAL', '0')))\n times_ = times if casual_test else intensive_times\n return repeat_with_success_at_least(times_, times_)", "def test_repeated_simuations_with_fixed_seed(self):\n random.seed(175203)\n expected_results = {-1: 5, 0: 36, 1: 43, 2: 16}\n self._setup_random_gen([0.01, 0.3, 0.58, 0.1, 0.01], [-1, 0, 1, 2, 3])\n\n simulation_results = Counter()\n for _ in range(100):\n simulation_results[self._random_gen.next_num()] += 1\n\n self.assertDictEqual(simulation_results, expected_results)", "def test_generator_upward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: 1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_max", "def constant_factory(value):\n return repeat(value).next", "def wrapper(*args, **kwargs):\n start = time.time()\n\n return func(*args, **kwargs), int(1000 * (time.time() - start))", "def generate_random_list_or_string(self):\n if self.iteration_deep < self.max_iteration_deep:\n self.iteration_deep += 1\n value = self.generate_random_list(5)\n self.iteration_deep -= 1\n else:\n value = self.generate_random_value(str)\n return value", "def section_4_7():\n import itertools\n\n def test1():\n def count(n):\n while True:\n yield n\n n += 1\n\n c = count(0)\n for x in itertools.islice(c, 10, 20):\n print(x)\n\n test1()", "def setNumberOfIterations(self, value):\n return self._set(numberOfIterations=value)", "def setNumberOfIterations(self, value):\n return self._set(numberOfIterations=value)", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def simulate(times=100):\r\n conveyor = Conveyor()\r\n conveyor.slots[0].item = Item.random()\r\n for _ in range(times):\r\n conveyor.run()\r\n return len([i for i in conveyor.outputs if i == Item.P])", "def __next__(self):\n self.idx += 1\n if self.idx >= len(self):\n self.cycles += 1\n self.restart()\n if not self.can_recycle():\n raise StopIteration(f\"Error max cycles have been reached for this GSM object. cycles={self.cycles}\")\n # if self.max_cycles >= 0:\n # if self.cycles >= self.max_cycles:\n # raise StopIteration(f\"Error max cycles have been reached for this GSM object. cycles={self.cycles}\")\n return self.state()", "def iteration_count_based(sched, warmup_iterations, iterations, runner, params):\n next_scheduled = 0\n total_iterations = warmup_iterations + iterations\n if total_iterations == 0:\n raise exceptions.RallyAssertionError(\"Operation must run at least for one iteration.\")\n for it in range(0, total_iterations):\n sample_type = metrics.SampleType.Warmup if it < warmup_iterations else metrics.SampleType.Normal\n percent_completed = (it + 1) / total_iterations\n yield (next_scheduled, sample_type, percent_completed, runner, params.params())\n next_scheduled = sched.next(next_scheduled)", "def update():\n global iteration, result\n iteration += 1\n # Stop iterating after max_iterations\n if iteration >= max_iterations:\n timer.stop()\n print \"Output is\", result\n else:\n result = get_next(result)", "def setNIterations(self, value):\n return self._set(nIterations=value)", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def _get_repeat_times(self):\n # The passed arg on_repeat is used to check that.\n\n # The arg passes 1 in case the --repeat flag is not passed\n # which means we simply need to loop for once.\n\n # The arg passes None in case the --repeat flag is passed but\n # without a value. In this case, we need to make sure the song goes\n # on an infinite loop. Though, in our case, we will make the loop run\n # for a really large value like 1000\n\n # The arg passes the number of times the loop is supposed the run in\n # case the value is passed by the user.\n\n if self.on_repeat == 1:\n return 1\n elif self.on_repeat is None:\n logger.info(\"Repeating indefinitely\")\n return 5000\n else:\n logger.info(\n \"Repeating {} {}\".format(\n self.on_repeat, \"time\" if self.on_repeat == 1 else \"times\"\n )\n )\n return self.on_repeat", "async def getrandom_number() :\n\n # run an infinite loop to continue generating random numbers\n while True: \n await asyncio.sleep(2) # let this task sleep for a while\n yield random.randint(0, sys.maxsize) # yield a random int", "def times(self, fn):\n for i in range(0, self._):\n fn()\n return self", "def gen_val(self, num_poses, batch_size):\n noise = self.make_noise(num_poses)\n labels = [1] * num_poses\n self.update_disc_copy()\n rv = self.nested_generator.evaluate(noise, labels,\n batch_size=batch_size)\n self.update_disc_copy()\n return rv", "def setIterations(self, value):\n return self._set(nIterations=value)", "def patched_generator(self, *args, **kwargs):\n self.validate(*args, **kwargs)\n yield from self.function(*args, **kwargs)", "def test_generator_method_name(self):\n for i in range(0, 4):\n yield 'try_odd', i", "def taking(n):\n if n <= 0:\n raise ValueError('taking() requires a positive value.')\n\n @coroutine\n def gen(target):\n for _ in range(n):\n x = (yield)\n target.send(x)\n\n raise StopConsumption()\n\n return gen", "def repeat(self, count):\n return self.Sequence((self,) * count)", "def clock(self):\n\t\tt0 = time.time_ns()\n\t\ttry:\n\t\t\tyield None\n\t\tfinally:\n\t\t\tself._runtime.value += time.time_ns() - t0", "def test_func_generator():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i", "def test_GeneratorBuilt(self):\n generator = Mock()\n genFn = Mock(return_value=generator)\n args = range(3)\n kwargs = {'one': 1, 'two': 2, 'three': 3}\n \n wrapper = KaoGenerator(genFn, *args, **kwargs)\n genFn.assert_called_once_with(*args, **kwargs)\n self.assertEqual(wrapper.generator, generator)", "async def sleep_generator(pos: int):\n if pos <= 0:\n raise ValueError(f\"none_zero must be some positive integer, got {pos}\")\n scale = 1 / pos / 10\n for i in range(pos):\n t = scale * (pos - i)\n LOGGER.info(f\"Number {i}, going to sleep {t} s\")\n await asyncio.sleep(t)\n LOGGER.info(f\"Number {i}, slept {t} s\")\n yield i", "def async_lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b", "def generate_random_dict_or_string(self):\n if self.iteration_deep < self.max_iteration_deep:\n self.iteration_deep += 1\n value = self.generate_random_dict(5)\n self.iteration_deep -= 1\n else:\n value = self.generate_random_value(str)\n return value", "def rng() -> int:", "def test_py3_return():\n\n @do\n def py3_generator_with_return():\n yield Effect(Constant(1))\n return 2 # noqa\n\n eff = py3_generator_with_return()\n assert perf(eff) == 2", "def wrap_generator(func):\n\n async def _wrapped(*a, **k):\n r, ret = None, []\n gen = func(*a, **k)\n while True:\n try:\n item = gen.send(r)\n except StopIteration:\n break\n if inspect.isawaitable(item):\n r = await item\n else:\n r = item\n ret.append(r)\n\n if len(ret) == 1:\n return ret.pop()\n return ret\n\n return _wrapped", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def next(self, x):\n self.next_called_n_times += 1\n return SequentialTaskCollection.next(self, x)", "def test_random_generator(self):\n gen = random_data()\n data = [next(gen) for _ in range(100)]\n self.assertEqual(len(data), 100)", "def __next__(self):\n self.n += 2\n if self.n > self.container.maximum:\n raise StopIteration\n return self.n", "def __next__(self):\n self.n += 2\n if self.n > self.container.maximum:\n raise StopIteration\n return self.n", "def customer_generator(env, inventory_stock):\n for i in itertools.count():\n yield env.timeout(random.randint(*T_INTER))\n env.process(customer(env, inventory_stock, 'Customer_'+str(i+1)))", "def test_sum_of_yields(n):\n x = 0\n x += yield (0, x)\n x += yield (0, x)\n yield (1, x)", "def for_loop(num_iters, body, initial_args):\n for i in range(num_iters):\n if i == 0:\n outputs = body(*initial_args)\n else:\n outputs = body(*outputs)\n return outputs" ]
[ "0.6483438", "0.6218066", "0.6070627", "0.60574543", "0.5988066", "0.58443666", "0.5823438", "0.57975304", "0.5627555", "0.5625004", "0.5599266", "0.5565379", "0.55576694", "0.5519748", "0.5512081", "0.5501229", "0.54756796", "0.5460212", "0.5435833", "0.54218477", "0.54203224", "0.5413908", "0.5413298", "0.53569335", "0.53565973", "0.5334986", "0.533437", "0.5331532", "0.53177315", "0.5306439", "0.52978253", "0.5264267", "0.5260488", "0.5250435", "0.5239843", "0.5238961", "0.5230391", "0.52299124", "0.5227389", "0.5221168", "0.5218522", "0.5206297", "0.51948553", "0.5189009", "0.51748586", "0.51674014", "0.5164125", "0.5161875", "0.5154856", "0.515122", "0.5141839", "0.51362586", "0.5132781", "0.5132254", "0.51088005", "0.50908613", "0.50896394", "0.5089506", "0.5085219", "0.50845265", "0.50830024", "0.5079268", "0.50462043", "0.5042603", "0.50374573", "0.50374573", "0.50263476", "0.50163305", "0.501067", "0.50069785", "0.5005259", "0.49921814", "0.4988495", "0.49868098", "0.49819767", "0.4976829", "0.4975075", "0.49745515", "0.49709624", "0.49435824", "0.4935095", "0.4927888", "0.49266624", "0.49259943", "0.49254367", "0.49116373", "0.49105492", "0.49099195", "0.4908878", "0.4903932", "0.4902029", "0.48994842", "0.48908296", "0.4888442", "0.48864695", "0.48861364", "0.48861364", "0.4880838", "0.4878766", "0.4878096" ]
0.7192209
0
Update `exercise_gen`, so it will ignore all exceptions
def exercise2(): g1 = exercise_gen("I'll ignore errors", 300) assert next(g1) == "I'll ignore errors" assert g1.send('new val') == 'new val' assert g1.throw(Exception) == 'new val' assert next(g1) == 'new val'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_gen(ret_val, times):", "def experiment3():\n raise FAKE_ERROR", "def test_post_codegen_error_query(self):\n with tempfile.TemporaryDirectory() as tmpdirname:\n translator = AstUprootTranslator()\n with pytest.raises(GenerateCodeException):\n translator.generate_code(\"\", cache_path=tmpdirname)", "def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)", "def testgen(self):\n self.parse()\n self.generate()", "def test_generate_all_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \"tests/system-testing/inputs/generate-all/\"\n input_filenames = [\n \"simplest.chatette\", \"only-words.chatette\",\n \"words-and-groups.chatette\", \"alias.chatette\", \"include.chatette\",\n \"slot.chatette\", \"slotrolegroup.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n if not TestSystem.check_no_duplicates(facade.train_examples):\n pytest.fail(\n \"Some examples were generated several times \" +\n \"when dealing with file '\" + filename + \"'.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n if len(legal_examples) != len(facade.train_examples):\n training_texts = [ex.text for ex in facade.train_examples]\n for legal_ex in legal_examples:\n if legal_ex[\"text\"] not in training_texts:\n pytest.fail(\n \"Example '\" + legal_ex[\"text\"] + \\\n \"' was not generated.\"\n )\n pytest.fail(\n \"An unknown example was not generated (\" + \\\n str(len(facade.train_examples)) + \\\n \" generated instead of \" + str(len(legal_examples)) + \\\n \").\\nGenerated: \" + str(facade.train_examples)\n )\n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )", "def testExplicitGeneratorConvenienceFunctionExceptionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.generator(x.g(8, 9), [10], Exception(\"bogus\"))\n\t\tc.replay()\n\t\tg = x.g(8, 9)\n\t\tself.failUnless(g.next() == 10)\n\t\tself.failUnlessRaises(Exception, g.next)", "def test_incorrect_prediction_key(self):\n self._config['Prediction key'] = 'wrong_key'\n with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def testExplicitGeneratorExecptionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.g(8, 9)\n\t\tc.generator()\n\t\tc.setReturn(10)\n\t\tc.setException(Exception(\"bogus\"))\n\t\tc.replay()\n\t\tg = x.g(8, 9)\n\t\tself.failUnless(g.next() == 10)\n\t\tself.failUnlessRaises(Exception, g.next)", "def test_create_unexpected_problem(self):\n pass", "def test_no_model(self):\n\n with self.assertRaisesRegex(ValueError,\n 'Please provide a model for this generator'):\n self._gen.generate(\n example=self._example,\n model=None,\n dataset=self._dataset,\n config=self._config)", "def generate(self, **kwargs):\n yield NotImplementedError", "def test_invalidate_error():\n \n test_object = fa.read_in_envision(data_csv=plate_2_repeat, platemap_csv=plate_map_file, data_type='plate', size=384)\n test_object.invalidate() # execute the invalidate function without specifying well ids, rows or columns to be invalidated", "def add_exercise( self, exercise ):\n self.exercises.append( exercise )", "def test_generate_missing(pytester):\n pytester.makefile(\n \".feature\",\n generation=textwrap.dedent(\n \"\"\"\\\n Feature: Missing code generation\n\n Background:\n Given I have a foobar\n\n Scenario: Scenario tests which are already bound to the tests stay as is\n Given I have a bar\n\n\n Scenario: Code is generated for scenarios which are not bound to any tests\n Given I have a bar\n\n\n Scenario: Code is generated for scenario steps which are not yet defined(implemented)\n Given I have a custom bar\n \"\"\"\n ),\n )\n\n pytester.makepyfile(\n textwrap.dedent(\n \"\"\"\\\n import functools\n\n from pytest_bdd import scenario, given\n\n scenario = functools.partial(scenario, \"generation.feature\")\n\n @given(\"I have a bar\")\n def _():\n return \"bar\"\n\n @scenario(\"Scenario tests which are already bound to the tests stay as is\")\n def test_foo():\n pass\n\n @scenario(\"Code is generated for scenario steps which are not yet defined(implemented)\")\n def test_missing_steps():\n pass\n \"\"\"\n )\n )\n\n result = pytester.runpytest(\"--generate-missing\", \"--feature\", \"generation.feature\")\n result.assert_outcomes(passed=0, failed=0, errors=0)\n assert not result.stderr.str()\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n ['Scenario \"Code is generated for scenarios which are not bound to any tests\" is not bound to any test *']\n )\n\n result.stdout.fnmatch_lines(\n [\n 'Step Given \"I have a custom bar\" is not defined in the scenario '\n '\"Code is generated for scenario steps which are not yet defined(implemented)\" *'\n ]\n )\n\n result.stdout.fnmatch_lines(\n ['Step Given \"I have a foobar\" is not defined in the background of the feature \"Missing code generation\" *']\n )\n\n result.stdout.fnmatch_lines([\"Please place the code above to the test file(s):\"])", "def source_exercise_target(self, node):\n std_domain = self.builder.env.domains['std']\n figtype = std_domain.get_enumerable_node_type(node.parent)\n assert figtype == 'solution'\n\n fig_id = node.parent['ids'][0]\n\n # sort out the label\n exercise_label = node.parent.attributes['exercise']\n\n names = node.parent['names']\n assert len(names) == 1\n assert names[0].startswith('sol:')\n\n # get exercise id\n assert fig_id.startswith('sol-')\n exercise_id = 'ex-{}'.format(fig_id[4:])\n assert exercise_id == nodes.make_id(exercise_label)\n\n # because the exercise may be in a different document, we go global\n all_labels = std_domain.data['labels']\n assert exercise_label in all_labels\n\n # track down the document and identifier\n exercise_source_docname = all_labels[exercise_label][0]\n fig_identifiers = self.builder.env.toc_fignumbers\n assert exercise_source_docname in fig_identifiers\n assert 'exercise' in fig_identifiers[exercise_source_docname]\n ex_docname_map = fig_identifiers[exercise_source_docname]['exercise']\n assert exercise_id in ex_docname_map\n\n fignumber = ex_docname_map[exercise_id]\n\n return exercise_source_docname, exercise_id, fignumber", "def rollback(self):\n\t\traise GeneratorException(\"Not implemented\")", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def new_exercise():\n db = get_db()\n users = db.users\n exercises = db.exercises\n data = request.json\n \n expected_fields = ['name', 'pic_urls', 'instructions', 'created_by']\n # If the feilds in data don't match the expected fields\n if not set(expected_fields) == set(data):\n raise APIException(status_code=400, message='data does not match the expected fields')\n if not ( isinstance(data['name'], str) and isinstance(data['instructions'], str)\n and isinstance(data['created_by'], str) and isinstance(data['pic_urls'], list) ):\n raise APIException(status_code=400, message='name, created_by, and instructions must be strings')\n\n for pic in data['pic_urls']:\n if not isinstance(pic, str):\n raise APIException(status_code=400, message='each pic_url must be a string')\n\n # Check if created_by is an existing user\n cursor = users.find({\"user_id\": data['created_by']})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='user_id represented by created_by does not exist')\n elif cursor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple users with same user_id (created_by) exist, which is not allowed')\n \n data['workouts_used_in'] = 0\n\n # Create n grams for exercise to be used in search\n data['ngrams'] = ' '.join(make_ngrams(str(data['name']).lower()))\n\n # Insert the new exercise and return its newly created key\n postid = exercises.insert_one(data)\n\n # Index the exercises in the database to be able to be searched\n exercises.search.create_index(\n [\n ('ngrams', 'text'),\n ],\n name='search_exercises',\n weights={\n 'ngrams': 100\n }\n )\n\n return_data = {\"exercise_id\": str(postid.inserted_id)}\n return flask.jsonify(**return_data), 200", "def test_generate_nb_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \\\n \"tests/system-testing/inputs/generate-nb/training-only/\"\n input_filenames = [\n \"only-words.chatette\", \"words-and-groups.chatette\",\n \"alias.chatette\", \"include.chatette\", \"slot.chatette\",\n \"bugfixes/bug-22-slot-position.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n # if not TestSystem.check_no_duplicates(facade.train_examples): # TODO: make sure there are no duplicates in this case\n # pytest.fail(\"Some examples were generated several times \"+\n # \"when dealing with file '\"+filename+\"'.\\n\"+\n # \"Generated: \"+str(facade.train_examples))\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n \n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )\n\n filename_zero = \"zero-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_zero)\n facade.run(file_path)\n if len(facade.train_examples) != 0:\n pytest.fail(\n \"When dealing with file 'zero-ex.chatette', no examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n\n filename_one = \"one-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_one)\n facade.run(file_path)\n print(\"TRAIN EX: \" + str(facade.train_examples))\n if len(facade.train_examples) != 1:\n pytest.fail(\n \"When dealing with file 'one-ex.chatette', one examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )", "def test_strain_not_in(generate_no_strain_one_file):\n fname = generate_no_strain_one_file\n with pytest.raises(Exception) as f:\n process_files([fname])", "def add_all_exercises(exam_date, path_all, path_collection):\n type_list = [x for x in os.listdir(path_collection) if '.DS_Store' not in x]\n print(type_list)\n for i in range(len(type_list)):\n print('Type: ' + type_list[i])\n os.mkdir(path_all + '/' + type_list[i])\n path_type = path_collection + '/' + type_list[i]\n nb_ex_type = len(os.listdir(path_type)) # indexing file da 0\n for j in range(nb_ex_type):\n chosen_type_yaml = path_type + '/' + type_list[i] + str(j) + '.yaml'\n if j+1>=9:\n path_ex = path_all + '/' + type_list[i] + '/istanza_' + str(j+1)\n else:\n path_ex = path_all + '/' + type_list[i] + '/istanza_0' + str(j+1)\n print(path_ex)\n os.mkdir(path_ex)\n mode1.create_exercise(exam_date, str(j+1), path_ex, chosen_type_yaml)\n #mode2.create_exercise(str(i+1), path_ex, chosen_type_yaml)\n #mode3.create_exercise(str(i+1), path_ex, chosen_type_yaml)\n print('Exercise ' + str(j+1) + ' added')\n return", "def generate_first_problem():\n click.echo(\"No Project Euler files found in the current directory.\")\n generate(1)\n sys.exit()", "def _test_generator(notebook):\n \n def test(self):\n nb, errors = run_notebook(notebook, kernel_name=self.kernel_name)\n \n message = ''\n if len(errors) > 0:\n for error in errors:\n message += '%s: %s\\n' % (error['ename'], error['evalue'])\n for line in error['traceback']:\n message += ' %s\\n' % line\n self.assertEqual(errors, [], message)\n \n return test", "def run(self):\n # NOTE: since this directive has a complementary `solution` directive\n # it may be better to put the two in a separate `exercise` domain\n env = self.state.document.settings.env\n\n # get the user-provided label of the exercise\n label = self.arguments[0]\n assert label.startswith('ex:'), (\n 'The exercise label ({}) must start with the \"ex:\" prefix.'.format(\n label))\n\n if self.content:\n content_string = '\\n'.join(self.content)\n content_list = self.content\n content_offset = self.content_offset\n else:\n content_string = read_exercise(env, label)\n content_list = content_string.split('\\n')\n content_offset = 0\n\n # we do not assign an id to this node (despite it being a prerequisite\n # for assigning it a fignum) as this will happen automatically when\n # a name is assigned to this node\n exercise_content_node = exercise(content_string)\n\n # since the label of the node was not given in the standard docutil\n # manner (via the optional `name` parameter), it needs to be manually\n # assigned to this instance of the exercise directive and processed,\n # i.e., it registers the label with the domain (standard `std` domain\n # in this case); it also checks whether the labels is not duplicated\n self.options['name'] = label\n self.add_name(exercise_content_node)\n # these steps ensure that the node created by this directive can be\n # referenced with `ref` and `numref`\n\n # build an empty exercise title, the fignum is injected when building\n # its HTML representation\n exercise_title_node = exercise_title()\n\n # add title to the exercise and process the content\n exercise_content_node += exercise_title_node\n self.state.nested_parse(\n content_list, content_offset, exercise_content_node)\n\n return [exercise_content_node]", "def test_acc(self):\n raise Exception(\" not implemented in base model\")", "def create_exercise(exam_date, num, path_ex_folder, path_yaml):\n global images_to_add\n global REL_PATH_IMAGES\n REL_PATH_IMAGES = 'img_' + exam_date\n images_to_add = []\n path_mode_free = path_ex_folder + '/modo_libero/' # new folder for the considered submission mode\n os.mkdir(path_mode_free)\n exer = read_exercise_yaml(path_yaml) # reading the given yaml\n notebook = nb.v4.new_notebook() # creating the new notebook\n #print(exer['name'])\n if exer['name'] in ('graphs_flow','graphs_trees', 'graphs_planarity','graphs_paths'):\n insert_graph_import(notebook) #required graph import\n insert_no_scroll(notebook) #no scroll of output div\n else:\t\n insert_import_mode_free(notebook) # required import\n insert_start_button(notebook) # start button to run cells with tag 'run_start'\n insert_hide_code(notebook) # hide all code cells\n insert_user_bar_lib(notebook,path_ex_folder) # insert user_bar.py in a code cell\n insert_heading(notebook, exer['title']) # heading with title\n insert_description1(notebook, exer['description1'], exam_date, path_ex_folder) # description 1\n if 'description2' in exer:\n insert_description2(notebook, exer['description2']) # description 2\n insert_tasks(notebook, exer['tasks']) # inserting the several tasks\n if exer['name'] in ('lp_duality', 'lp_interactive', 'lp_modelling', 'lp_two_phases'): # other libraries needed for some types of exercises\n insert_needed_import(notebook, exer['name'])\n if int(num) >= 10: # writing the notebook and saving it in the correct folder\n note_name = 'Esercizio_' + num + '.ipynb'\n prev_folder = 'esercizio_' + num\n else:\n note_name = 'Esercizio_0' + num + '.ipynb'\n prev_folder = 'esercizio_0' + num\n insert_rendition(notebook, note_name)\n nb.write(notebook, note_name)\n os.rename(os.getcwd()+ '/' + note_name, path_mode_free + '/' + note_name)\n os.system(\"jupyter trust \" + path_mode_free + note_name) # signing the notebook in order to make it trusted\n insert_suppl_folders(path_mode_free) # inserting the supplementary folders (i.e., 'allegati', 'img')\n if exer['name'] in ('graphs_flow','graphs_trees', 'graphs_planarity','graphs_paths'):\n insert_graph_folder(path_mode_free)\n if 'tags' in exer:\n e_dict = {'title':exer['title'],'tags':exer['tags'],'tot_points':0,'link':'http://127.0.0.1:8888/notebooks/'+prev_folder+'/modo_libero/' + note_name, 'tasks':exer['tasks']}\n else:\n\t e_dict = {'title':exer['title'],'tags':[],'tot_points':0,'link':'http://127.0.0.1:8888/notebooks/'+prev_folder+'/modo_libero/' + note_name, 'tasks':exer['tasks']}\n return e_dict", "def test_sudoku_solver_handles_garbage_input():\n from sudoku_solver_hard_unique import setup\n with pytest.raises(Exception) as e_info:\n candidates, dicts, square_coords = setup(invalid)\n assert str(e_info.value) == \"Garbage input: 'a' at coord (0, 8), not a valid Sudoku\"", "def unexpectedException(self):", "def test_generate_03_raise_exception(self):\n move = self.get_new_move(3)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n default_next_serial_number='code-xxx',\n ))\n wiz = form_wizard.save()\n with self.assertRaises(UserError):\n wiz.generate_serial_numbers()\n\n form_wizard.next_serial_count = 0\n # Must raise an exception because `next_serial_count` must be greater than 0.\n with self.assertRaises(ValidationError):\n form_wizard.save()", "def dummy(args):\n\n task_ids = {'1': LossTypes.mse, '2': LossTypes.mse, '3': LossTypes.cross_entropy}\n input_dimension = 5000 # Dimensionality of each training set\n num_inputs_train = 750\n num_inputs_validate = 100\n num_inputs_test = 150\n\n # Training set\n x_train = np.random.random((num_inputs_train, input_dimension))\n y_train = {}\n\n # Validation set\n x_validate = np.random.random((num_inputs_validate, input_dimension))\n y_validate = {}\n\n # Testing set\n x_test = np.random.random((num_inputs_test, input_dimension))\n y_test = {}\n\n for task_id, loss_type in task_ids.iteritems():\n if loss_type is LossTypes.mse:\n y_train[task_id] = np.random.random((num_inputs_train, 1))\n y_validate[task_id] = np.random.random((num_inputs_validate, 1))\n y_test[task_id] = np.random.random((num_inputs_test, 1))\n elif loss_type is LossTypes.cross_entropy:\n # Training labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_train).reshape(1, num_inputs_train)\n y_train[task_id] = convert_to_one_hot(labels)\n\n # Validation labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_validate).reshape(1, num_inputs_validate)\n y_validate[task_id] = convert_to_one_hot(labels)\n\n # Testing labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_test).reshape(1, num_inputs_test)\n y_test[task_id] = convert_to_one_hot(labels)\n\n exp = Experiment(expt_name=\"synthetic\", task_ids=task_ids, x_train=x_train, x_validate=x_validate,\n x_test=x_test, y_train=y_train, y_validate=y_validate, y_test=y_test,\n model_class=LowLevelSharingModel, learning_rate=args.learning_rate,\n batch_size=args.batch_size, num_epochs=args.num_epochs)\n exp.initialize_network()\n exp.train()\n sys.stderr.write(\"Training complete. Logs, outputs, and model saved in \" + os.getcwd())", "def run_gen_and_econ(self):\n try:\n super().run_gen_and_econ()\n finally:\n temp_dir = getattr(self, \"_temp_dir\", None)\n if temp_dir is not None:\n temp_dir.cleanup()", "def test_stress_not_in(generate_no_stress_one_file):\n fname = generate_no_stress_one_file\n with pytest.raises(Exception):\n process_files([fname])", "def gen(self):\n raise NotImplementedError(\"(%s).gen\" % self)", "def test_ascii_increment(self):\r\n self.assertRaises(ValueError, convert_fastq, self.fasta_file_path,\r\n self.qual_file_path, ascii_increment=140, output_directory=self.output_dir)\r\n self.assertRaises(ValueError, convert_fastq, self.fasta_file_path,\r\n self.qual_file_path, ascii_increment=10, output_directory=\r\n self.output_dir)", "def test_generation_is_disabled():\n assert not GENERATE", "def test_resume(self, tmp_path):\n test_trainer = pl.Trainer(checkpoint_callback=False, logger=False)\n\n # Error because explicit_log_dir does not exist\n with pytest.raises(NotFoundError):\n exp_manager(\n test_trainer,\n {\n \"exp_dir\": str(tmp_path / \"test_resume\"),\n \"resume_if_exists\": True,\n \"explicit_log_dir\": \"Does_not_exist\",\n },\n )\n\n # Error because checkpoints folder does not exist\n with pytest.raises(NotFoundError):\n exp_manager(test_trainer, {\"resume_if_exists\": True, \"exp_dir\": str(tmp_path / \"test_resume\")})\n\n # No error because we tell exp_manager to ignore notfounderror\n exp_manager(\n test_trainer,\n {\n \"resume_if_exists\": True,\n \"exp_dir\": str(tmp_path / \"test_resume_2\"),\n \"resume_ignore_no_checkpoint\": True,\n },\n )\n\n test_trainer = pl.Trainer(checkpoint_callback=False, logger=False)\n Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\").mkdir(parents=True)\n # Error because checkpoints do not exist in folder\n with pytest.raises(NotFoundError):\n exp_manager(\n test_trainer,\n {\n \"resume_if_exists\": True,\n \"explicit_log_dir\": str(tmp_path / \"test_resume\" / \"default\" / \"version_0\"),\n },\n )\n\n Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\" / \"mymodel--end.ckpt\").touch()\n # Error because *end.ckpt is in folder indicating that training has already finished\n with pytest.raises(ValueError):\n exp_manager(\n test_trainer,\n {\n \"resume_if_exists\": True,\n \"explicit_log_dir\": str(tmp_path / \"test_resume\" / \"default\" / \"version_0\"),\n },\n )\n\n Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\" / \"mymodel--end.ckpt\").unlink()\n Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\" / \"mymodel--last.ckpt\").touch()\n Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\" / \"mymodel2--last.ckpt\").touch()\n # Error because multiple *last.ckpt is in folder. If more than one, don't know which to restore\n with pytest.raises(ValueError):\n exp_manager(\n test_trainer,\n {\n \"resume_if_exists\": True,\n \"explicit_log_dir\": str(tmp_path / \"test_resume\" / \"default\" / \"version_0\"),\n },\n )\n\n # Finally succeed\n Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\" / \"mymodel2--last.ckpt\").unlink()\n log_dir = exp_manager(\n test_trainer,\n {\"resume_if_exists\": True, \"explicit_log_dir\": str(tmp_path / \"test_resume\" / \"default\" / \"version_0\")},\n )\n checkpoint = Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\" / \"mymodel--last.ckpt\")\n assert Path(test_trainer.resume_from_checkpoint).resolve() == checkpoint.resolve()\n\n # Succeed again and make sure that run_0 exists and previous log files were moved\n test_trainer = pl.Trainer(checkpoint_callback=False, logger=False)\n exp_manager(test_trainer, {\"resume_if_exists\": True, \"explicit_log_dir\": str(log_dir)})\n checkpoint = Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"checkpoints\" / \"mymodel--last.ckpt\")\n assert Path(test_trainer.resume_from_checkpoint).resolve() == checkpoint.resolve()\n prev_run_dir = Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"run_0\")\n assert prev_run_dir.exists()\n prev_log = Path(tmp_path / \"test_resume\" / \"default\" / \"version_0\" / \"run_0\" / \"lightning_logs.txt\")\n assert prev_log.exists()", "def test_generate_all_testing(self):\n pass", "def test_exDo():\n doizeExDo = doing.doizeExDo\n assert inspect.isgeneratorfunction(doizeExDo)\n assert hasattr(doizeExDo, \"tock\")\n assert hasattr(doizeExDo, \"opts\")\n assert \"states\" in doizeExDo.opts\n assert doizeExDo.opts[\"states\"] == None\n doizeExDo.opts[\"states\"] = []\n\n tymist = tyming.Tymist()\n\n dog = doizeExDo(tymth=tymist.tymen(), tock=doizeExDo.tock, **doizeExDo.opts)\n assert inspect.isgenerator(dog)\n tock = dog.send(None)\n assert tock == 0.0\n tock = dog.send(\"Hello\")\n assert tock == 0.0\n tock = dog.send(\"Hi\")\n assert tock == 0.0\n tock = dog.close()\n assert tock == None\n with pytest.raises(StopIteration):\n tock = dog.send(\"what?\")\n assert doizeExDo.opts[\"states\"] == [State(tyme=0.0, context='enter', feed=0.0, count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.0, context='recur', feed='Hi', count=2),\n State(tyme=0.0, context='close', feed=None, count=3),\n State(tyme=0.0, context='exit', feed=None, count=4)]\n\n doizeExDo.opts[\"states\"] = []\n dog = doizeExDo(tymth=tymist.tymen(), tock=1.0, **doizeExDo.opts)\n assert inspect.isgenerator(dog)\n tock = dog.send(None)\n assert tock == 1.0\n tock = dog.send(\"Hello\")\n assert tock == 1.0\n tock = dog.send(\"Hi\")\n assert tock == 1.0\n tock = dog.close()\n assert tock == None\n with pytest.raises(StopIteration):\n tock = dog.send(\"what?\")\n assert doizeExDo.opts[\"states\"] == [State(tyme=0.0, context='enter', feed=0.0, count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.0, context='recur', feed='Hi', count=2),\n State(tyme=0.0, context='close', feed=None, count=3),\n State(tyme=0.0, context='exit', feed=None, count=4)]\n\n\n doizeExDo.opts[\"states\"] = []\n dog = doizeExDo(tymth=tymist.tymen(), tock=1.0, **doizeExDo.opts)\n assert inspect.isgenerator(dog)\n tock = next(dog)\n assert tock == 1.0\n tock = next(dog)\n assert tock == 1.0\n tock = next(dog)\n assert tock == 1.0\n tock = dog.close()\n assert tock == None\n with pytest.raises(StopIteration):\n tock = dog.send(\"what?\")\n assert doizeExDo.opts[\"states\"] == [State(tyme=0.0, context='enter', feed=0.0, count=0),\n State(tyme=0.0, context='recur', feed=None, count=1),\n State(tyme=0.0, context='recur', feed=None, count=2),\n State(tyme=0.0, context='close', feed=None, count=3),\n State(tyme=0.0, context='exit', feed=None, count=4)]\n\n \"\"\"End Test \"\"\"", "def test_given_negative_driver_steps_generate_raise_exception(\n self):\n with self.assertRaises(tf.errors.InvalidArgumentError):\n generator_component.generate_movielens_dataset_for_bigquery(\n project_id=PROJECT_ID,\n raw_data_path=RAW_DATA_PATH,\n batch_size=BATCH_SIZE,\n rank_k=RANK_K,\n num_actions=NUM_ACTIONS,\n driver_steps=-1,\n bigquery_tmp_file=self.bigquery_tmp_file,\n bigquery_dataset_id=BIGQUERY_DATASET_ID,\n bigquery_location=BIGQUERY_LOCATION,\n bigquery_table_id=BIGQUERY_TABLE_ID)", "def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i", "def test_offensive_degenerate_case(self):\n from parlai.scripts.detect_offensive_language import DetectOffensive\n\n report = DetectOffensive.main(\n task='integration_tests:overfit', safety='all', mutators='degenerate'\n )\n assert report['classifier_offenses%'] == 0\n assert report['exs'] == 4", "def test_setup_solvent_models():\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script = get_template_script(tmp_dir)\n\n # Setup solvation system and reduce clearance to make test faster.\n template_script['systems']['hydration-system']['solvent1'] = 'PME'\n template_script['solvents']['PME']['clearance'] = '3.0 * angstrom'\n del template_script['experiments']\n\n # Test solvent models.\n for solvent_model in ['tip3p', 'tip4pew', 'tip3pfb', 'tip5p']:\n yaml_script = copy.deepcopy(template_script)\n yaml_script['solvents']['PME']['solvent_model'] = solvent_model\n if solvent_model == 'tip3p' or solvent_model == 'tip4pew':\n solvent_parameters = ['leaprc.water.' + solvent_model]\n else:\n solvent_parameters = ['leaprc.water.tip3p', 'frcmod.' + solvent_model]\n yaml_script['solvents']['PME']['leap']['parameters'] = solvent_parameters\n yaml_script['options']['setup_dir'] = solvent_model\n exp_builder = ExperimentBuilder(yaml_script)\n\n # Infer number of expected atoms per water molecule from model.\n expected_water_n_atoms = int(list(filter(str.isdigit, solvent_model))[0])\n\n # Setup the system and check that water residues have expected number of particles.\n prmtop_filepath = exp_builder._db.get_system('hydration-system')[0].parameters_path\n topology = mdtraj.load_prmtop(prmtop_filepath)\n yield assert_equal, topology.residue(1).n_atoms, expected_water_n_atoms", "def test_validate_inputs_generator_inputs(ctx, generate_eos_inputs):\n value = generate_eos_inputs()\n value['scale_factors'] = []\n assert eos.validate_inputs(value, ctx) is None\n\n value['generator_inputs']['electronic_type'] = 'invalid_value'\n assert \"invalid_value' is not a valid ElectronicType\" in eos.validate_inputs(value, ctx)", "def test_exploding_resources_to_number(ppg2_per_test):\n a = ppg.FileGeneratingJob(\n \"a\", lambda of: of.write_text(\"a\"), resources=ppg.Resources._RaiseInToNumber\n )\n with pytest.raises(ppg.FatalGraphException):\n ppg.run()\n assert \"Not a Resource\" in str(a.exception)", "def main():\n # Read input from file, returns all objects\n objects = read_input()\n for obj in objects:\n try:\n # Generate the objects answer, yields new object\n obj = generate_answer(obj)\n except Exception:\n # If an error might occur that is not covered, catch it here! Continue where left off\n print('ERROR: An unrecoverable error occured during the processing of ' + obj.get(\n 'operation') + '. Continuing...')\n obj['answer'] = 'ERROR'\n\n print(obj) # TODO: Remove before production\n # Generate an output file\n print_output(objects)", "def test_trydoer_throw():\n tymist = tyming.Tymist(tock=0.125)\n doer = TryDoer(tymth=tymist.tymen(), tock=0.25)\n assert doer.tock == 0.25\n assert doer.states == []\n assert tymist.tyme == 0.0\n\n do = doer(tymth=doer.tymth, tock=doer.tock)\n assert inspect.isgenerator(do)\n result = do.send(None)\n assert result == 0.25\n assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0)]\n result = do.send(\"Hello\")\n assert result == 0.25\n assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1)]\n\n\n tymist.tick()\n result = do.send(\"Hi\")\n assert result == 0.25\n assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.125, context='recur', feed='Hi', count=2)]\n tymist.tick()\n try:\n result = do.throw(ValueError, \"Bad\")\n except ValueError as ex:\n assert ex.args[0] == \"Bad\" # exception alue is thrown value\n assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.125, context='recur', feed='Hi', count=2),\n State(tyme=0.25, context='abort', feed='Bad', count=3),\n State(tyme=0.25, context='exit', feed=None, count=4)]\n\n # send after throw\n tymist.tick()\n try:\n result = do.send(\"what?\")\n except StopIteration as ex:\n assert ex.value == None # after throw no StopIteration value\n assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.125, context='recur', feed='Hi', count=2),\n State(tyme=0.25, context='abort', feed='Bad', count=3),\n State(tyme=0.25, context='exit', feed=None, count=4)]", "def test_pregenerated_model(sub_test, case):\n\n if case.startswith(\"sensi2\"):\n model_name = sub_test + \"_o2\"\n else:\n model_name = sub_test\n\n model_swig_folder = str(\n Path(__file__).parents[2]\n / \"build\"\n / \"tests\"\n / \"cpp\"\n / f\"external_{model_name}-prefix\"\n / \"src\"\n / f\"external_{model_name}-build\"\n / \"swig\"\n )\n\n test_model_module = amici.import_model_module(\n module_name=model_name, module_path=model_swig_folder\n )\n model = test_model_module.getModel()\n solver = model.getSolver()\n amici.readModelDataFromHDF5(\n options_file, model.get(), f\"/{sub_test}/{case}/options\"\n )\n amici.readSolverSettingsFromHDF5(\n options_file, solver.get(), f\"/{sub_test}/{case}/options\"\n )\n\n edata = None\n if \"data\" in expected_results[sub_test][case].keys():\n edata = amici.readSimulationExpData(\n str(expected_results_file), f\"/{sub_test}/{case}/data\", model.get()\n )\n rdata = amici.runAmiciSimulation(model, solver, edata)\n\n check_derivative_opts = dict()\n\n if model_name == \"model_nested_events\":\n check_derivative_opts[\"rtol\"] = 1e-2\n elif model_name == \"model_events\":\n check_derivative_opts[\"atol\"] = 1e-3\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n and not model_name.startswith(\"model_neuron\")\n and not case.endswith(\"byhandpreeq\")\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n verify_simulation_opts = dict()\n\n if model_name.startswith(\"model_neuron\"):\n verify_simulation_opts[\"atol\"] = 1e-5\n verify_simulation_opts[\"rtol\"] = 1e-2\n\n if model_name.startswith(\"model_robertson\") and case == \"sensiforwardSPBCG\":\n verify_simulation_opts[\"atol\"] = 1e-3\n verify_simulation_opts[\"rtol\"] = 1e-3\n\n verify_simulation_results(\n rdata, expected_results[sub_test][case][\"results\"], **verify_simulation_opts\n )\n\n if model_name == \"model_steadystate\" and case == \"sensiforwarderrorint\":\n edata = amici.amici.ExpData(model.get())\n\n # Test runAmiciSimulations: ensure running twice\n # with same ExpData yields same results\n if (\n edata\n and model_name != \"model_neuron_o2\"\n and not (model_name == \"model_robertson\" and case == \"sensiforwardSPBCG\")\n ):\n if isinstance(edata, amici.amici.ExpData):\n edatas = [edata, edata]\n else:\n edatas = [edata.get(), edata.get()]\n\n rdatas = amici.runAmiciSimulations(\n model, solver, edatas, num_threads=2, failfast=False\n )\n verify_simulation_results(\n rdatas[0],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n verify_simulation_results(\n rdatas[1],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n\n # test residuals mode\n if solver.getSensitivityMethod() == amici.SensitivityMethod.adjoint:\n with pytest.raises(RuntimeError):\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n else:\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"res\", \"sres\", \"y\", \"sy\", \"sigmay\", \"ssigmay\"],\n **verify_simulation_opts,\n )\n with pytest.raises(RuntimeError):\n solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n\n chi2_ref = rdata.chi2\n\n # test likelihood mode\n solver.setReturnDataReportingMode(amici.RDataReporting.likelihood)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"llh\", \"sllh\", \"s2llh\", \"FIM\"],\n **verify_simulation_opts,\n )\n\n # test sigma residuals\n\n if (\n model_name == \"model_jakstat_adjoint\"\n and solver.getSensitivityMethod() != amici.SensitivityMethod.adjoint\n ):\n model.setAddSigmaResiduals(True)\n solver.setReturnDataReportingMode(amici.RDataReporting.full)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether activation changes chi2\n assert chi2_ref != rdata.chi2\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n chi2_ref = rdata.chi2\n res_ref = rdata.res\n\n model.setMinimumSigmaResiduals(100)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether changing the minimum changes res but not chi2\n assert np.isclose(chi2_ref, rdata.chi2)\n assert not np.allclose(res_ref, rdata.res)\n\n model.setMinimumSigmaResiduals(-10)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether having a bad minimum results in nan chi2\n assert np.isnan(rdata.chi2)\n\n with pytest.raises(RuntimeError):\n model.getParameterByName(\"thisParameterDoesNotExist\")", "def run_gen_and_econ(self):\n try:\n super().run_gen_and_econ()\n except SAMExecutionError as e:\n logger.error(\"Skipping site {}; received sam error: {}\"\n .format(self._site, str(e)))\n self.outputs = {}", "def test_given_float_driver_steps_generate_raise_exception(\n self):\n with self.assertRaises(TypeError):\n generator_component.generate_movielens_dataset_for_bigquery(\n project_id=PROJECT_ID,\n raw_data_path=RAW_DATA_PATH,\n batch_size=BATCH_SIZE,\n rank_k=RANK_K,\n num_actions=NUM_ACTIONS,\n driver_steps=0.5,\n bigquery_tmp_file=self.bigquery_tmp_file,\n bigquery_dataset_id=BIGQUERY_DATASET_ID,\n bigquery_location=BIGQUERY_LOCATION,\n bigquery_table_id=BIGQUERY_TABLE_ID)", "def skip(problem):\n click.echo(\"Current problem is problem %i.\" % problem)\n generate(problem + 1, prompt_default=False)", "def test_validation_wrong_solvents():\n # Each test case is a pair (regexp_error, solvent_description).\n solvents = [\n (\"nonbonded_cutoff:\\n- can be specified only with the following nonbonded methods \\['CutoffPeriodic', 'CutoffNonPeriodic',\\n 'Ewald', 'PME'\\]\",\n {'nonbonded_cutoff': '3*nanometers'}),\n (\"solvent_model:\\n- unallowed value unknown_solvent_model\",\n {'nonbonded_method': 'PME', 'solvent_model': 'unknown_solvent_model'}),\n (\"leap:\\n- must be of dict type\",\n {'nonbonded_method': 'PME', 'solvent_model': 'tip3p', 'leap': 'leaprc.water.tip3p'}),\n (\"implicit_solvent:\\n- can be specified only if nonbonded method is NoCutoff\",\n {'nonbonded_method': 'PME', 'clearance': '3*angstroms', 'implicit_solvent': 'OBC2'}),\n (\"blabla:\\n- unknown field\",\n {'nonbonded_method': 'NoCutoff', 'blabla': '3*nanometers'}),\n (\"''implicit_solvent'' cannot be coerced: module ''simtk.openmm.app'' has no\\n attribute ''OBX2'''\",\n {'nonbonded_method': 'NoCutoff', 'implicit_solvent': 'OBX2'}),\n (\"''implicit_solvent_salt_conc'' cannot be coerced: Units of 1.0\\*angstrom\",\n {'implicit_solvent': 'OBC2', 'implicit_solvent_salt_conc': '1.0*angstrom'})\n ]\n for regexp, solvent in solvents:\n yield assert_raises_regexp, YamlParseError, regexp, ExperimentBuilder._validate_solvents, {'solv': solvent}", "def test_workon_fail(self, monkeypatch):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n def build_fail(*args, **kwargs):\n raise RuntimeError(\"You shall not build!\")\n\n monkeypatch.setattr(\"orion.core.io.experiment_builder.build\", build_fail)\n\n # Flush storage singleton\n\n with pytest.raises(RuntimeError) as exc:\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert exc.match(\"You shall not build!\")\n\n # Now test with a prior storage\n with OrionState(\n storage={\"type\": \"legacy\", \"database\": {\"type\": \"EphemeralDB\"}}\n ) as cfg:\n storage = cfg.storage\n\n with pytest.raises(RuntimeError) as exc:\n workon(foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\")\n\n assert exc.match(\"You shall not build!\")", "def generate_submissons_all_steps():\n\n\n data_en = read_json_file(\"Test_Data/test-en.json\")\n data_pr = read_json_file(\"Test_Data/test-pr.json\")\n data_es = read_json_file(\"Test_Data/test-es.json\")\n res_en = generate_embeddings_sentence_test_data(data_en, \"Test_Data/embd-en.pkl\")\n res_es = generate_embeddings_sentence_test_data(data_es, \"Test_Data/embd-es.pkl\")\n res_pr = generate_embeddings_sentence_test_data(data_pr, \"Test_Data/embd-pr.pkl\")\n model = load_model(\"model_doc\")\n make_submission(res_es, model, \"submission-es\")\n make_submission(res_pr, model, \"submission-pr\")\n make_submission(res_en, model, \"submission-en\")\n exit()", "def test_init(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n BaseSample('SKM7.640188', SampleTemplate(1))", "def test_initialization_homework_result_homework_negative():\n with pytest.raises(TypeError):\n lazy_student.do_homework(\"oop_hw\", \"done this\")", "def task_gen(self):\n pass", "def test_pytest_bdd_with_missing_step_implementation(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 4\n assert spans[0].get_tag(ERROR_MSG)", "def test__generate_details_raises_error(self):\n # Setup\n base_property = BaseSingleTableProperty()\n\n # Run and Assert\n with pytest.raises(NotImplementedError):\n base_property._generate_details(None, None, None, None)", "def test_train_test_split_uni_exo(load_uni_exo_data_target):\n data, target = load_uni_exo_data_target\n\n ####################################\n #### Continuous fh without Gaps ####\n ####################################\n\n #### Integer fh ----\n exp = TSForecastingExperiment()\n fh = 12\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test.index == data.iloc[-fh:].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.X_test.index == data.iloc[-fh:].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test.index == data.iloc[-fh:].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(exp.train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test_transformed.index == data.iloc[-fh:].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(exp.X_train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.X_test_transformed.index == data.iloc[-fh:].index)\n assert np.all(exp.y_train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test_transformed.index == data.iloc[-fh:].index)\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(1, 10) # 9 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.X_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.X_test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [1, 2, 3, 4, 5, 6]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.X_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.X_test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #################################\n #### Continuous fh with Gaps ####\n #################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(7, 13) # 6 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n\n ####################################\n #### Discontinuous fh with Gaps ####\n ####################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.array([4, 5, 6, 10, 11, 12]) # 6 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test_transformed.index == data.iloc[-max(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6, 10, 11, 12]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test_transformed.index == data.iloc[-max(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)", "def py_exercises(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Exercise(row [1], row [2])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select e.id,\n e.name,\n e.language\n \n from exercises e\n where e.language = \"Python\"\n order by e.language\n \"\"\")\n\n py_exercises = db_cursor.fetchall()\n print('\\n***Python Exercises***')\n for exercise in py_exercises:\n print(exercise)", "def test_reproduce_run_issue():\n # update the following four lines if necessary\n ep_version = \"8-9-0\"\n idffile = \"V8_9/smallfile.idf\"\n iddfile = \"Energy+V8_9_0.idd\"\n epwfile = \"USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw\"\n\n _, eplus_home = paths_from_version(ep_version)\n idfname = os.path.join(IDF_FILES, idffile)\n iddfile = os.path.join(IDD_FILES, iddfile)\n epwfile = os.path.join(eplus_home, \"WeatherData\", epwfile)\n modeleditor.IDF.setiddname(iddfile, testing=True)\n idf = IDF(idfname, epwfile)\n # make any changes to the IDF here\n try:\n # Add any additional `run` kwargs here\n # `ep_version` kwarg is required due to buggy test isolation\n idf.run(output_directory=\"test_dir\", ep_version=ep_version)\n # Add any tests for expected/unexpected outputs here\n except Exception:\n # Add any tests for expected/unexpected exceptions here\n raise\n finally:\n shutil.rmtree(\"test_dir\", ignore_errors=True)", "def test_notrunerror(self, MetricClass):\n m = MetricClass()\n with pytest.raises(NotRunError):\n RandomTrader(seed=42).evaluate(m)", "def test_main_incorrect_type():\n with pytest.raises(Exception) as e_info:\n main([\"./excelAddinGenerator\", \"./src/data/xl/styles.xml\", \"fail.xlam\"])", "def _generate_input_file(self):\n if self.input_static:\n return\n\n if self._input_generator_name is None:\n logger.error(\"A testcase has neither a generator nor a static input\")\n self.input_generation_log = \"Generation failed. No generator specified.\"\n self.input_generation_successful = False\n elif self._input_generator is None:\n self.input_generation_log = \"Generation failed. Generator {} not found\".format(\n self._input_generator_name,\n )\n self.input_generation_successful = False\n else:\n generation_command = get_execution_command(self._input_generator.source_language, \"generator\")\n generation_command.extend(shlex.split(self._input_generation_parameters))\n stdout_redirect = \"output.txt\"\n\n try:\n generator_compiled = self._input_generator.compiled_file\n except:\n self.input_generation_log = \"Generation failed. Generator didn't compile. Log: {}\".format(\n self._input_generator.last_compile_log\n )\n self.save()\n return\n\n action = ActionDescription(\n commands=[generation_command],\n executables=[(\"generator\", generator_compiled)],\n stdout_redirect=stdout_redirect,\n output_files=[stdout_redirect],\n time_limit=settings.FAILSAFE_TIME_LIMIT,\n memory_limit=settings.FAILSAFE_MEMORY_LIMIT\n )\n success, execution_success, outputs, sandbox_datas = execute_with_input(action)\n if not success:\n logger.error(\"Generating input for testcase {} failed.\\n Sandbox data:\\n{}\".format(\n str(self),\n str(sandbox_datas[0]))\n )\n self.input_generation_log = \\\n \"System failed to generate the input. \" \\\n \"Check the logs for more details. \" \\\n \"This issue must be resolved by a system administrator\"\n self.input_generation_successful = False\n elif not execution_success:\n self.input_generation_log = \"Generation failed. {}.\".format(\n str(sandbox_datas[0])\n )\n self.input_generation_successful = False\n else:\n self._input_generated_file = outputs[stdout_redirect]\n self.input_generation_log = \"Generation successful.\"\n self.input_generation_successful = True\n self.save()", "def add_exercise():\n json_data = request.get_json()\n new_question = json_data.get(\"new_question\")\n new_answer = json_data.get(\"new_answer\")\n user_id = session.get(\"email\")\n try:\n fm.add_exercise(new_question, new_answer, user_id)\n msg = \"Exercise added for user: {}\".format(user_id)\n app.logger.info(msg)\n return jsonify({\"message\": \"add exercise call completed\"})\n except Exception as e:\n msg = \"The question or the answer to be added has exceeded the max char limit\"\n app.logger.error(msg)\n abort(400)", "def test_init_wrong_template(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n PrepSample('1.SKB8.640193', SampleTemplate(1))", "def test_illegal_input(self):\n cs = ConfigurationSpace()\n cs.add_hyperparameter(UniformFloatHyperparameter('test', 1, 10, 5))\n scen = Scenario({'run_obj': 'quality', 'cs': cs})\n stats = Stats(scen)\n # Recorded runs but no incumbent.\n stats.ta_runs = 10\n smac = SMAC(scen, stats=stats, rng=np.random.RandomState(42))\n self.output_dirs.append(scen.output_dir)\n self.assertRaises(ValueError, smac.optimize)\n # Incumbent but no recoreded runs.\n incumbent = cs.get_default_configuration()\n smac = SMAC(scen, restore_incumbent=incumbent,\n rng=np.random.RandomState(42))\n self.assertRaises(ValueError, smac.optimize)", "def _generate_output_file(self):\n\n if self.output_static:\n return\n\n if not self.input_file_generated():\n self.output_generation_log = \"Generation failed. Input wasn't generated\"\n self.output_generation_successful = False\n else:\n solution = self.solution\n if solution is None:\n self.output_generation_log = \"Generation failed. No model solution specified.\"\n self.output_generation_successful = False\n else:\n problem_code = self.problem.get_judge_code()\n testcase_code = self.get_judge_code()\n judge = self.problem.get_judge()\n task_type = self.problem.get_task_type()\n if solution.language not in judge.get_supported_languages():\n self.output_generation_log = \\\n \"Generation failed. Solution language is not supported by the judge\"\n self.output_generation_successful = False\n else:\n evaluation_result = task_type.generate_output(\n problem_code=problem_code,\n testcase_code=testcase_code,\n language=solution.language,\n solution_file=(solution.name, solution.code),\n )\n if not evaluation_result.success:\n self.output_generation_log = \\\n \"Generation failed. Judge couldn't execute the solution. Details: {}\".format(\n evaluation_result.message\n )\n self.output_generation_successful = False\n elif evaluation_result.verdict != JudgeVerdict.ok:\n self.output_generation_log = \\\n \"Generation failed. Solution exited with verdict {} on the judge\".format(\n str(evaluation_result.verdict.name)\n )\n self.output_generation_successful = False\n else:\n self.output_generation_log = \"Generation successful\"\n self.output_generation_successful = True\n self._output_generated_file = evaluation_result.output_file\n self.save()", "def test_skip_is_valid(self):\n idaa_index = 3\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertRaises(Exception, upload_program.upload)", "def test_init_wrong_template(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n Sample('SKB8.640193', PrepTemplate(1))", "def test_episodenotfound(self):\n self.assertRaises(tvdb_episodenotfound, lambda:self.t['Scrubs'][1][30])", "def test_invalid_game_setup(self):\n with self.assertRaises(ValueError):\n self._game.add_player(self._creator, 1)\n with self.assertRaises(ValueError):\n self._game.add_player(self._users[1], 0)\n for x in xrange(1, 4):\n self._game.add_player(self._users[x], x)\n with self.assertRaises(ValueError):\n self._game.add_player(self._users[4], 1)", "def _generate_examples(self, filepath, split):\r\n if self.config.name == \"trex\":\r\n paths = filepath\r\n relations_path = paths[0]\r\n paths = paths[1:]\r\n all_rels = {}\r\n with open(relations_path, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n all_rels[data[\"relation\"]] = data\r\n id_ = -1\r\n for filepath in paths:\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n pred = all_rels.get(data[\"predicate_id\"], {})\r\n for evidences in data[\"evidences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"obj_uri\": str(data[\"obj_uri\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"sub_uri\": str(data[\"sub_uri\"]),\r\n \"sub_label\": str(data[\"sub_label\"]),\r\n \"predicate_id\": str(data[\"predicate_id\"]),\r\n \"sub_surface\": str(evidences[\"sub_surface\"]),\r\n \"obj_surface\": str(evidences[\"obj_surface\"]),\r\n \"masked_sentence\": str(evidences[\"masked_sentence\"]),\r\n \"template\": str(pred.get(\"template\", \"\")),\r\n \"template_negated\": str(pred.get(\"template_negated\", \"\")),\r\n \"label\": str(pred.get(\"label\", \"\")),\r\n \"description\": str(pred.get(\"description\", \"\")),\r\n \"type\": str(pred.get(\"type\", \"\")),\r\n }\r\n elif self.config.name == \"conceptnet\":\r\n id_ = -1\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n if data.get(\"negated\") is not None:\r\n for masked_sentence, negated in zip(data[\"masked_sentences\"], data[\"negated\"]):\r\n id_ += 1\r\n yield id_, {\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"sub\": str(data.get(\"sub\", \"\")),\r\n \"obj\": str(data.get(\"obj\", \"\")),\r\n \"pred\": str(data[\"pred\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"masked_sentence\": str(masked_sentence),\r\n \"negated\": str(negated),\r\n }\r\n else:\r\n for masked_sentence in data[\"masked_sentences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"sub\": str(data.get(\"sub\", \"\")),\r\n \"obj\": str(data.get(\"obj\", \"\")),\r\n \"pred\": str(data[\"pred\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"masked_sentence\": str(masked_sentence),\r\n \"negated\": str(\"\"),\r\n }\r\n elif self.config.name == \"squad\":\r\n id_ = -1\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n for masked_sentence in data[\"masked_sentences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"id\": str(data[\"id\"]),\r\n \"sub_label\": str(data[\"sub_label\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"negated\": str(data.get(\"negated\", \"\")),\r\n \"masked_sentence\": str(masked_sentence),\r\n }\r\n elif self.config.name == \"google_re\":\r\n id_ = -1\r\n paths = filepath\r\n for filepath in paths:\r\n # from https://github.com/facebookresearch/LAMA/blob/master/scripts/run_experiments.py\r\n if \"place_of_birth\" in filepath:\r\n pred = {\r\n \"relation\": \"place_of_birth\",\r\n \"template\": \"[X] was born in [Y] .\",\r\n \"template_negated\": \"[X] was not born in [Y] .\",\r\n }\r\n elif \"date_of_birth\" in filepath:\r\n pred = {\r\n \"relation\": \"date_of_birth\",\r\n \"template\": \"[X] (born [Y]).\",\r\n \"template_negated\": \"[X] (not born [Y]).\",\r\n }\r\n else:\r\n pred = {\r\n \"relation\": \"place_of_death\",\r\n \"template\": \"[X] died in [Y] .\",\r\n \"template_negated\": \"[X] did not die in [Y] .\",\r\n }\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n for masked_sentence in data[\"masked_sentences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"pred\": str(data[\"pred\"]),\r\n \"sub\": str(data[\"sub\"]),\r\n \"obj\": str(data[\"obj\"]),\r\n \"evidences\": str(data[\"evidences\"]),\r\n \"judgments\": str(data[\"judgments\"]),\r\n \"sub_w\": str(data[\"sub_w\"]),\r\n \"sub_label\": str(data[\"sub_label\"]),\r\n \"sub_aliases\": str(data[\"sub_aliases\"]),\r\n \"obj_w\": str(data[\"obj_w\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"obj_aliases\": str(data[\"obj_aliases\"]),\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"masked_sentence\": str(masked_sentence),\r\n \"template\": str(pred[\"template\"]),\r\n \"template_negated\": str(pred[\"template_negated\"]),\r\n }", "def _exogenous_input_step(\n self, current_times, current_exogenous_regressors, state):\n raise NotImplementedError(\n \"Exogenous inputs are not implemented for this example.\")", "def test_case_2(self):\n config_generator = Genesis()\n\n data = open_json_file('r1.json')\n\n task_1 = Task.new(data=data)\n self.assertTrue(config_generator.add_task(task_1))\n\n with self.assertRaises(TypeError):\n bad_task = object()\n self.assertFalse(config_generator.add_task(bad_task))\n\n self.assertEqual(len(config_generator.tasks), 1)", "def test_trydo_throw():\n tymist = tyming.Tymist(tock=0.125)\n assert tymist.tyme == 0.0\n states = []\n\n do = tryDo(tymth=tymist.tymen(), states=states, tock=0.25)\n assert inspect.isgenerator(do)\n result = do.send(None)\n assert result == 0\n assert states == [State(tyme=0.0, context='enter', feed='Default', count=0)]\n result = do.send(\"Hello\")\n assert result == 1\n assert states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1)]\n\n\n tymist.tick()\n result = do.send(\"Hi\")\n assert result == 2\n assert states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.125, context='recur', feed='Hi', count=2)]\n tymist.tick()\n try:\n result = do.throw(ValueError, \"Bad\")\n except ValueError as ex:\n assert ex.args[0] == \"Bad\"\n assert states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.125, context='recur', feed='Hi', count=2),\n State(tyme=0.25, context='abort', feed='Hi', count=3),\n State(tyme=0.25, context='exit', feed='Hi', count=4)]\n\n tymist.tick()\n try:\n result = do.send(\"what?\")\n except StopIteration as ex:\n assert ex.value == None # not clean return\n assert states == [State(tyme=0.0, context='enter', feed='Default', count=0),\n State(tyme=0.0, context='recur', feed='Hello', count=1),\n State(tyme=0.125, context='recur', feed='Hi', count=2),\n State(tyme=0.25, context='abort', feed='Hi', count=3),\n State(tyme=0.25, context='exit', feed='Hi', count=4)]", "def make_row(invalid_data=[]):\n\n ### Database Identifiers ###\n \n # phac_sample_id, valid/invalid error-grid information.\n phac_sample_id, phac_sample_id_error = lib.random_phac_id(invalid_data)\n # umbrella_bioproject_accession, valid/invalid error-grid information.\n ub_accession, ub_accession_error = lib.umbrella_bioproject_accession(invalid_data)\n # bioproject_accession, valid/invalid error-grid information.\n bp_accession, bp_accession_error = lib.random_bioproject_accession(invalid_data)\n # biosample_accession, valid/invalid error-grid information.\n bs_accession, bs_accession_error = lib.random_biosample_accession(invalid_data)\n # sra_accession, valid/invalid error-grid information.\n sra_accession, sra_accession_error = lib.random_sra_accession(invalid_data)\n # genbank_accession, valid/invalid error-grid information.\n gb_accession, gb_accession_error = lib.random_genbank_accession(invalid_data)\n # gisaid_accession, valid/invalid error-grid information.\n gisaid_accession, gisaid_accession_error = lib.random_gisaid_accession(invalid_data)\n \n ### Sample Collection and Processing ###\n \n # sample_collected_by, valid/invalid error-grid information.\n samp_col_by, samp_col_by_error = lib.random_agency(invalid_data)\n # sample_collector_contact_email, valid/invalid error-grid information.\n samp_col_email, samp_col_email_error = lib.random_email(invalid_data)\n # sample_collector_contact_address, valid/invalid error-grid information.\n samp_col_address, samp_col_address_error = lib.random_address(invalid_data)\n # sequence_submitter_contact_email, valid/invalid error-grid information.\n seq_sub_email, seq_sub_email_error = lib.random_email(invalid_data)\n # sequence_submitter_contact_address, valid/invalid error-grid information.\n seq_sub_address, seq_sub_address_error = lib.random_address(invalid_data)\n # sample_collection_date, valid/invalid error-grid information.\n samp_col_date, samp_col_date_error = lib.random_date(invalid_data)\n # sample_received_date, valid/invalid error-grid information.\n samp_rec_date, samp_rec_date_error = lib.random_date(invalid_data)\n # geo_loc_name_country, valid/invalid error-grid information.\n geo_loc_country, geo_loc_country_error = lib.random_country(invalid_data)\n # geo_loc_name_province_territory, valid/invalid error-grid information.\n geo_loc_prov_ter, geo_loc_prov_ter_error = lib.random_province_territory(invalid_data)\n # geo_loc_name_city, valid/invalid error-grid information.\n geo_loc_city, geo_loc_city_error = lib.random_city(invalid_data)\n # organism, valid/invalid error-grid information.\n organism, organism_error = lib.random_organism(invalid_data)\n # purpose_of_sampling, valid/invalid error-grid information.\n p_o_sampling, p_o_sampling_error = lib.random_purpose_of_sampling(invalid_data)\n # anatomical_material, valid/invalid error-grid information.\n anat_material, anat_material_error = lib.random_anatomical_material(invalid_data)\n # anatomical_part, valid/invalid error-grid information.\n anat_part, anat_part_error = lib.random_anatomical_part(invalid_data)\n # body_product, valid/invalid error-grid information.\n body_product, body_product_error = lib.random_body_product(invalid_data)\n # environmental_material, valid/invalid error-grid information.\n envi_material, envi_material_error = lib.random_environmental_material(invalid_data)\n # environmental_site, valid/invalid error-grid information.\n envi_site, envi_site_error = lib.random_environmental_site(invalid_data)\n # collection_device, valid/invalid error-grid information.\n col_device, col_device_error = lib.random_collection_device(invalid_data)\n # collection_method, valid/invalid error-grid information.\n col_method, col_method_error = lib.random_collection_method(invalid_data)\n # collection_protocol, valid/invalid error-grid information.\n col_protocol, col_protocol_error = lib.fake_protocol(invalid_data)\n # specimen_processing, valid/invalid error-grid information.\n spec_process, spec_process_error = lib.random_specimen_processing(invalid_data)\n # lab_host, valid/invalid error-grid information.\n lab_host, lab_host_error = lib.random_lab_host(invalid_data)\n # passage_number , valid/invalid error-grid information.\n passage_num, passage_num_error = lib.random_passage_number(invalid_data)\n # passage_method, valid/invalid error-grid information.\n passage_method, passage_method_error = lib.passage_method_text(invalid_data)\n # biomaterial_extracted, valid/invalid error-grid information.\n biom_extract, biom_extract_error = lib.random_biomaterial_extracted(invalid_data)\n \n ### Host Information ###\n \n # host_common_name, valid/invalid error-grid information.\n host_com_name, host_com_name_error = lib.random_host_common_name(invalid_data)\n # host_scientific_name, valid/invalid error-grid information.\n host_sci_name, host_sci_name_error = lib.random_host_scientific_name(invalid_data)\n # host_health_state, valid/invalid error-grid information.\n host_health_state, host_health_state_error = lib.random_host_health_state(invalid_data)\n # host_health_status_details, valid/invalid error-grid information.\n host_health_status, host_health_status_error = lib.random_host_health_status_details(invalid_data)\n # host_disease, valid/invalid error-grid information.\n host_disease, host_disease_error = lib.random_host_disease(invalid_data)\n # host_age, valid/invalid error-grid information.\n host_age, host_age_error = lib.random_host_age(invalid_data)\n # host_gender, valid/invalid error-grid information.\n host_gender, host_gender_error = lib.random_host_gender(invalid_data)\n # host_origin_geo_loc_name_country, valid/invalid error-grid information.\n host_loc_country, host_loc_country_error = lib.random_country(invalid_data)\n # host_subject_id, valid/invalid error-grid information.\n host_sub_id, host_sub_id_error = lib.random_host_subject_id(invalid_data)\n # symptom_onset_date, valid/invalid error-grid information.\n symp_onset_date, symp_onset_date_error = lib.random_date(invalid_data)\n # signs_and_symptoms, valid/invalid error-grid information.\n signs_symptoms, signs_symptoms_error = lib.random_signs_symptoms(invalid_data)\n \n ### Host Exposure Information ###\n \n # location_of_exposure_geo_loc_name_country, valid/invalid error-grid information.\n loc_exp_country, loc_exp_country_error = lib.random_country(invalid_data)\n # travel_history, valid/invalid error-grid information. \n trav_history, trav_history_error = lib.random_travel_history(invalid_data)\n # exposure_event, valid/invalid error-grid information.\n exp_event, exp_event_error = lib.random_exposure_event(invalid_data)\n \n ### Sequencing ###\n \n # minion_barcode, valid/invalid error-grid information.\n minion_barcode, minion_barcode_error = lib.random_minIon_barcode(invalid_data)\n # sequencing_instrument, valid/invalid error-grid information.\n seq_instrument, seq_instrument_error = lib.random_seq_instrument(invalid_data)\n # sequencing_protocol_name, valid/invalid error-grid information.\n seq_prot_name, seq_prot_name_error = lib.fake_protocol(invalid_data)\n # sequencing_protocol_source, valid/invalid error-grid information.\n seq_prot_source, seq_prot_source_error = lib.random_seq_protocol_source(invalid_data)\n # sequencing_kit_number, valid/invalid error-grid information.\n seq_kit_num, seq_kit_num_error = lib.random_seq_kit_num(invalid_data)\n # amplicon_pcr_primers_filename, valid/invalid error-grid information.\n amp_pcr_filename, amp_pcr_filename_error = lib.random_txt_filename(invalid_data)\n \n ### Bioinformatics and QC metrics ###\n \n # raw_sequence_data_processing, valid/invalid error-grid information.\n raw_seq_process, raw_seq_process_error = lib.random_seq_process(invalid_data)\n # sequencing_depth_average, valid/invalid error-grid information.\n seq_depth_avg, seq_depth_avg_error = lib.random_seq_depth(invalid_data)\n # assembly_method, valid/invalid error-grid information.\n assemb_method, assemb_method_error = lib.random_assembly_software(invalid_data)\n # assembly_coverage_breadth, valid/invalid error-grid information.\n assemb_cov_breadth, assemb_cov_breadth_error = lib.random_assembly_coverage_breadth(invalid_data)\n # assembly_coverage_depth, valid/invalid error-grid information.\n assemb_cov_depth, assemb_cov_depth_error = lib.random_seq_depth(invalid_data)\n # r1_fastq_filename, valid/invalid error-grid information.\n r1_filename, r1_filename_error = lib.random_fastq_filename(invalid_data)\n # r2_fastq_filename, valid/invalid error-grid information.\n r2_filename, r2_filename_error = lib.random_fastq_filename(invalid_data)\n # r1_fastq_filepath, valid/invalid error-grid information.\n r1_filepath, r1_filepath_error = lib.random_filepath(r1_filename, invalid_data)\n # r2_fastq_filepath, valid/invalid error-grid information.\n r2_filepath, r2_filepath_error = lib.random_filepath(r2_filename, invalid_data)\n # fast5_filename, valid/invalid error-grid information.\n fast5_filename, fast5_filename_error = lib.random_fast5_filename(invalid_data)\n # fast5_filepath, valid/invalid error-grid information.\n fast5_filepath, fast5_filepath_error = lib.random_filepath(fast5_filename, invalid_data)\n # fasta_filename, valid/invalid error-grid information.\n fasta_filename, fasta_filename_error = lib.random_fasta_filename(invalid_data)\n # fasta_filepath, valid/invalid error-grid information.\n fasta_filepath, fasta_filepath_error = lib.random_filepath(fasta_filename, invalid_data)\n # number_base_pairs, valid/invalid error-grid information.\n num_bp, num_bp_error = lib.random_bp_num(invalid_data)\n # consensus_genome_length, valid/invalid error-grid information.\n cons_genome_len, cons_genome_len_error = lib.random_genome_length(invalid_data)\n # mean_contig_length, valid/invalid error-grid information.\n mean_contig_len, mean_contig_len_error = lib.random_contig_length(invalid_data)\n # n50, valid/invalid error-grid information.\n n50, n50_error = lib.random_n50(invalid_data)\n # ns_per_100_kbp, valid/invalid error-grid information.\n ns_100kbp, ns_100kbp_error = lib.random_ns_100kbp(invalid_data)\n # reference_genome_accession, valid/invalid error-grid information.\n ref_genome_accession, ref_genome_accession_error = lib.random_ref_genome(invalid_data)\n # consensus_sequence_id, valid/invalid error-grid information.\n cons_seq_id, cons_seq_id_error = lib.random_consensus_seq_id(invalid_data)\n # consensus_sequence_method, valid/invalid error-grid information.\n cons_seq_method, cons_seq_method_error = lib.random_consensus_seq_method(invalid_data)\n # consensus_sequence_filename, valid/invalid error-grid information.\n cons_seq_filename, cons_seq_filename_error = lib.random_fasta_filename(invalid_data)\n # consensus_sequence_filepath, valid/invalid error-grid information.\n cons_seq_filepath, cons_seq_filepath_error = lib.random_filepath(cons_seq_filename, invalid_data)\n # annotation_feature_table_filename, valid/invalid error-grid information.\n annot_table_filename, annot_table_filename_error = lib.random_feature_table_filename(invalid_data)\n # bioinformatics_protocol, valid/invalid error-grid information.\n biof_protocol, biof_protocol_error = lib.bioinformatics_protocol(invalid_data)\n \n ### Pathogen Diagnostic Testing ###\n \n # gene_name_1, valid/invalid error-grid information.\n gene_1, gene_1_error = lib.random_gene(invalid_data)\n # diagnostic_pcr_protocol_1, valid/invalid error-grid information.\n pcr_protocol_1, pcr_protocol_1_error = lib.fake_protocol(invalid_data)\n # diagnostic_pcr_ct_value_1, valid/invalid error-grid information.\n pcr_ct_1, pcr_ct_1_error = lib.random_pcr_ct_val(invalid_data)\n # gene_name_2, valid/invalid error-grid information.\n gene_2, gene_2_error = lib.random_gene(invalid_data)\n # diagnostic_pcr_protocol_2, valid/invalid error-grid information.\n pcr_protocol_2, pcr_protocol_2_error = lib.fake_protocol(invalid_data)\n # diagnostic_pcr_ct_value_2, valid/invalid error-grid information.\n pcr_ct_2, pcr_ct_2_error = lib.random_pcr_ct_val(invalid_data)\n \n ### Contributor Acknowledgement ###\n \n # authors, valid/invalid error-grid information.\n authors, authors_error = lib.authors(invalid_data)\n \n ### Dependent IDs ###\n \n # specimen_collector_sample_id.\n spec_col_sample_id, spec_col_sample_id_error = lib.random_specimen_collector_sample_id(\n geo_loc_country, \n geo_loc_prov_ter,\n geo_loc_city,\n invalid_data)\n # irida_sample_name.\n irida_sample_id = spec_col_sample_id\n # sequence_submitted_by.\n seq_sub_by = samp_col_by\n # library_id, valid/invalid error-grid information.\n library_id, library_id_error = lib.random_library_id(spec_col_sample_id, invalid_data)\n # isolate.\n isolate = spec_col_sample_id\n # assembly_name, valid/invalid error-grid information.\n assemb_name, assemb_name_error = lib.random_assembly_name(spec_col_sample_id, invalid_data)\n\n # Row of generated data organised by column.\n cols = [spec_col_sample_id,\n phac_sample_id,\n irida_sample_id,\n ub_accession,\n bp_accession,\n bs_accession,\n sra_accession,\n gb_accession,\n gisaid_accession,\n samp_col_by,\n samp_col_email,\n samp_col_address,\n seq_sub_by,\n seq_sub_email,\n seq_sub_address,\n samp_col_date,\n samp_rec_date,\n geo_loc_country,\n geo_loc_prov_ter,\n geo_loc_city,\n organism,\n isolate,\n p_o_sampling,\n anat_material,\n anat_part,\n body_product,\n envi_material,\n envi_site,\n col_device,\n col_method,\n col_protocol,\n spec_process,\n lab_host,\n passage_num,\n passage_method,\n biom_extract,\n host_com_name,\n host_sci_name,\n host_health_state,\n host_health_status,\n host_disease,\n host_age,\n host_gender,\n host_loc_country,\n host_sub_id,\n symp_onset_date,\n signs_symptoms,\n loc_exp_country,\n trav_history,\n exp_event,\n library_id,\n minion_barcode,\n seq_instrument,\n seq_prot_name,\n seq_prot_source,\n seq_kit_num,\n amp_pcr_filename,\n raw_seq_process,\n seq_depth_avg,\n assemb_name,\n assemb_method,\n assemb_cov_breadth,\n assemb_cov_depth,\n r1_filename,\n r2_filename,\n r1_filepath,\n r2_filepath,\n fast5_filename,\n fast5_filepath,\n fasta_filename,\n fasta_filepath,\n num_bp,\n cons_genome_len,\n mean_contig_len,\n n50,\n ns_100kbp,\n ref_genome_accession,\n cons_seq_id,\n cons_seq_method,\n cons_seq_filename,\n cons_seq_filepath,\n annot_table_filename,\n biof_protocol,\n gene_1,\n pcr_protocol_1,\n pcr_ct_1,\n gene_2,\n pcr_protocol_2,\n pcr_ct_2,\n authors]\n\n # Error grid valid/invalid (error specific) information.\n grid = ['-', # spec_col_sample_id_error placeholder\n phac_sample_id_error,\n '-', # irida_sample_id_error placeholder\n ub_accession_error,\n bp_accession_error,\n bs_accession_error,\n sra_accession_error,\n gb_accession_error,\n gisaid_accession_error,\n samp_col_by_error,\n samp_col_email_error,\n samp_col_address_error,\n '-', # seq_sub_by_error placeholder\n seq_sub_email_error,\n seq_sub_address_error,\n samp_col_date_error,\n samp_rec_date_error,\n geo_loc_country_error,\n geo_loc_prov_ter_error,\n geo_loc_city_error,\n organism_error,\n '-', # isolate_error placeholder\n p_o_sampling_error,\n anat_material_error,\n anat_part_error,\n body_product_error,\n envi_material_error,\n envi_site_error,\n col_device_error,\n col_method_error,\n col_protocol_error,\n spec_process_error,\n lab_host_error,\n passage_num_error,\n passage_method_error,\n biom_extract_error,\n host_com_name_error,\n host_sci_name_error,\n host_health_state_error,\n host_health_status_error,\n host_disease_error,\n host_age_error,\n host_gender_error,\n host_loc_country_error,\n host_sub_id_error,\n symp_onset_date_error,\n signs_symptoms_error,\n loc_exp_country_error,\n trav_history_error,\n exp_event_error,\n library_id_error,\n minion_barcode_error,\n seq_instrument_error,\n seq_prot_name_error,\n seq_prot_source_error,\n seq_kit_num_error,\n amp_pcr_filename_error,\n raw_seq_process_error,\n seq_depth_avg_error,\n assemb_name_error,\n assemb_method_error,\n assemb_cov_breadth_error,\n assemb_cov_depth_error,\n r1_filename_error,\n r2_filename_error,\n r1_filepath_error,\n r2_filepath_error,\n fast5_filename_error,\n fast5_filepath_error,\n fasta_filename_error,\n fasta_filepath_error,\n num_bp_error,\n cons_genome_len_error,\n mean_contig_len_error,\n n50_error,\n ns_100kbp_error,\n ref_genome_accession_error,\n cons_seq_id_error,\n cons_seq_method_error,\n cons_seq_filename_error,\n cons_seq_filepath_error,\n annot_table_filename_error,\n biof_protocol_error,\n gene_1_error,\n pcr_protocol_1_error,\n pcr_ct_1_error,\n gene_2_error,\n pcr_protocol_2_error,\n pcr_ct_2_error,\n authors_error]\n\n # return row of data for data file and row of data validity for error grid.\n return cols, grid", "def test_analyze_recipe_instructions(self):\n pass", "def test_forfatal_functions(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n num_observations = 10\n num_features = 2\n\n sim = Simulator(num_observations=num_observations, num_features=num_features)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs),\n \"batch\": np.random.randint(2, size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime + batch\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n\n summary = test.summary()\n ids = test.gene_ids\n\n # 1. Test all additional functions which depend on model computation:\n # 1.1. Only continuous model:\n temp = test.log_fold_change(genes=ids, nonnumeric=False)\n temp = test.max(genes=ids, nonnumeric=False)\n temp = test.min(genes=ids, nonnumeric=False)\n temp = test.argmax(genes=ids, nonnumeric=False)\n temp = test.argmin(genes=ids, nonnumeric=False)\n temp = test.summary(nonnumeric=False)\n # 1.2. Full model:\n temp = test.log_fold_change(genes=ids, nonnumeric=True)\n temp = test.max(genes=ids, nonnumeric=True)\n temp = test.min(genes=ids, nonnumeric=True)\n temp = test.argmax(genes=ids, nonnumeric=True)\n temp = test.argmin(genes=ids, nonnumeric=True)\n temp = test.summary(nonnumeric=True)\n\n return True", "def test_step_out_of_bounds_indices(self):\n _, data_directory = self._collect_episode_data(\n num_episodes=6, max_episodes_per_file=3)\n with riegeli_backend_reader.RiegeliBackendReader(\n data_directory) as data_reader:\n self.assertRaises(IndexError, operator.getitem, data_reader.steps,\n len(data_reader.steps))\n self.assertRaises(IndexError, operator.getitem, data_reader.steps,\n -len(data_reader.steps) - 1)", "def test_generate_missing_with_step_parsers(pytester):\n pytester.makefile(\n \".feature\",\n generation=textwrap.dedent(\n \"\"\"\\\n Feature: Missing code generation with step parsers\n\n Scenario: Step parsers are correctly discovered\n Given I use the string parser without parameter\n And I use parsers.parse with parameter 1\n And I use parsers.re with parameter 2\n And I use parsers.cfparse with parameter 3\n \"\"\"\n ),\n )\n\n pytester.makepyfile(\n textwrap.dedent(\n \"\"\"\\\n import functools\n\n from pytest_bdd import scenarios, given, parsers\n\n scenarios(\"generation.feature\")\n\n @given(\"I use the string parser without parameter\")\n def _():\n return None\n\n @given(parsers.parse(\"I use parsers.parse with parameter {param}\"))\n def _(param):\n return param\n\n @given(parsers.re(r\"^I use parsers.re with parameter (?P<param>.*?)$\"))\n def _(param):\n return param\n\n @given(parsers.cfparse(\"I use parsers.cfparse with parameter {param:d}\"))\n def _(param):\n return param\n \"\"\"\n )\n )\n\n result = pytester.runpytest(\"--generate-missing\", \"--feature\", \"generation.feature\")\n result.assert_outcomes(passed=0, failed=0, errors=0)\n assert not result.stderr.str()\n assert result.ret == 0\n\n output = result.stdout.str()\n\n assert \"I use the string parser\" not in output\n assert \"I use parsers.parse\" not in output\n assert \"I use parsers.re\" not in output\n assert \"I use parsers.cfparse\" not in output", "def get_exercises(self) -> Iterator[Tuple[int, List[File]]]:\n for cell in self.cells:\n exercise_id, files = self.get_exercise(cell)\n if exercise_id and files and files[0].code:\n yield (exercise_id, files)", "def test_ascii_increment(self):\r\n self.assertRaises(ValueError, convert_fastaqual, self.fasta_file_path,\r\n ascii_increment=140, output_directory=self.output_dir)", "def generate(self):\r\n raise NotImplementedError", "def bad_examples(self, input_rows):\n for row in input_rows.split(\"===\"):\n row = row.strip()\n if row == \"\" or row.startswith(\"#\"):\n continue\n\n if \"->\" in row:\n field, expected_error = row.split(\"->\")\n else:\n field = row\n expected_error = \"None\"\n\n field = field.strip()\n expected_error = expected_error.strip() + \"\\n\"\n yield field, expected_error", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def test_cases():\n CasesTestCase.generate_tests()\n yield CasesTestCase\n yield DocTestsTestCase", "def setUp(self):\n self.t = Task()\n self.t(\"add one mississippi\")\n self.t(\"add two mississippi\")", "def main():\n\n args = get_args()\n random.seed(args.seed)\n wod = []\n\n for name, low, high in read_csv(args.file):\n reps = random.randint(low, high)\n if args.easy:\n reps = int(reps / 2)\n wod.append((name, reps))\n\n wod = random.sample(wod, k=args.num_exercises)\n print(tabulate(wod, headers=('Exercise', 'Reps')))", "def do_checks(self):\n # ## get valid experiment variables\n all_subexperiments = [1, 2, 3]\n all_plates = list(range(1, 19))\n all_cell_ids = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n all_samples = list(self.experiment.design['Sample'])\n all_genes = self.experiment.subexperiments[1].plates[1].samples[all_samples[0]].genes\n all_replicates = list(range(1, 7))\n all_time = [0.5, 1.0, 2.0, 3.0, 4.0, 8.0, 12.0, 24.0, 48.0, 72.0, 96.0]\n\n if self.time is None:\n if self.treatment is 'Baseline':\n self.time = [0.0, 96.0]\n else:\n self.time = all_time\n\n if self.cell_id is None:\n self.cell_id = all_cell_ids\n\n if self.gene is None:\n self.gene = all_genes\n\n if self.replicate is None:\n self.replicate = all_replicates\n\n if self.treatment is None:\n raise ValueError('treatment cannot be None. Specify one of \"TGFb\", \"Control\", \"Baseline\"')\n\n if not isinstance(self.treatment, str):\n raise ValueError('treatment must be a string. Got \"{}\" a \"{}\"'.format(\n self.treatment, type(self.treatment)\n ))\n\n if not isinstance(self.normed, bool):\n raise ValueError('normed argument should be boolean. Got \"{}\"'.format(\n type(self.normed)\n ))\n\n if not isinstance(self.time, list):\n self.time = [self.time]\n\n for time_point in self.time:\n if time_point not in sorted(list(set(self.data.columns.get_level_values(1)))):\n raise ValueError('\"{}\" is invalid time point. Valid time '\n 'points are: {}'.format(\n time_point, list(self.data.columns))\n )", "def test_invalid_flag_record(self):\n log.info(\"START INTEG TEST INVALID\")\n\n self.clear_sample_data()\n self.clear_async_data()\n\n # Made-up data with all flags except the first set to True.\n # First flag is not a zero or one.\n filename = \"A1000003.DEC\"\n self.create_sample_data('invalid_A0000003.DEC', filename)\n\n # Start sampling.\n self.driver.start_sampling()\n\n # an event catches the sample exception\n self.assert_event('ResourceAgentErrorEvent')\n\n # Verify that the entire file has been read.\n self.assert_file_ingested(filename)\n log.info(\"END INTEG TEST INVALID\")", "def test_teacher_check_homework_raises_homework_repeat_error_if_same_solution_was_already_submitted():\n with pytest.raises(HomeworkRepeatError):\n opp_teacher.check_homework(result_1)\n advanced_python_teacher.check_homework(result_1)\n Teacher.reset_results(oop_hw)", "def generate(cls):\n raise NotImplementedError()", "def test_noregen(self, tmpdir, treantclass):\n with tmpdir.as_cwd():\n # 1\n t1 = treantclass('newone')\n t2 = treantclass('newone', new=True)\n assert t1.uuid != t2.uuid\n\n with pytest.raises(dtr.treants.MultipleTreantsError):\n t3 = treantclass('newone')", "def test_bad_input_data(tool):\n\n for cmd in (\"filter\", \"report\", \"start\", \"stats\"):\n for args in tool.bad_paths:\n if cmd == \"filter\":\n args = f\"--rfilt 'index!=0' {args}\"\n with pytest.raises(Exceptions.Error):\n tool.command(cmd, args)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _exogenous_input_step(\r\n self, current_times, current_exogenous_regressors, state):\r\n raise NotImplementedError(\r\n \"Exogenous inputs are not implemented for this example.\")" ]
[ "0.5758369", "0.56753606", "0.5549753", "0.53802556", "0.53689146", "0.53568685", "0.5292572", "0.528666", "0.5275125", "0.5251637", "0.5170305", "0.51676023", "0.51614165", "0.51531315", "0.5127131", "0.5115587", "0.51139444", "0.5084856", "0.50776774", "0.50622576", "0.5061116", "0.50604576", "0.5056269", "0.50464183", "0.50455076", "0.5027918", "0.5026984", "0.5012438", "0.5011268", "0.5010567", "0.50068825", "0.4984202", "0.49720994", "0.49708736", "0.49699226", "0.4968732", "0.4963637", "0.49557304", "0.49499848", "0.49319428", "0.49282315", "0.4926757", "0.4919709", "0.4911246", "0.491052", "0.49088734", "0.49087796", "0.4907601", "0.49010172", "0.48975754", "0.48952937", "0.4884655", "0.48821163", "0.48655906", "0.48578584", "0.48449463", "0.48429522", "0.48405087", "0.48404485", "0.48289967", "0.48250985", "0.48142722", "0.48070654", "0.48024192", "0.48023096", "0.47993007", "0.4786551", "0.4785649", "0.47848982", "0.47814837", "0.4781178", "0.4771078", "0.476438", "0.47616172", "0.47549367", "0.4753912", "0.4752425", "0.47438845", "0.4742989", "0.4742515", "0.47367445", "0.4727853", "0.47256687", "0.47220895", "0.4721348", "0.4719712", "0.47185138", "0.47065252", "0.4705373", "0.4700886", "0.47001746", "0.46937302", "0.46908563", "0.46907854", "0.46883154", "0.46869", "0.4686816", "0.4686816", "0.4686816", "0.4684215" ]
0.6592326
0
Token serializer encodes a JWT correctly.
def test_encode_token(token): assert token.count('.') == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, payload):\n jwt_payload = payload.copy()\n if self.audience is not None:\n jwt_payload['aud'] = self.audience\n if self.issuer is not None:\n jwt_payload['iss'] = self.issuer\n\n token = jwt.encode(jwt_payload, self.signing_key, algorithm=self.algorithm)\n if isinstance(token, bytes):\n # For PyJWT <= 1.7.1\n return token.decode('utf-8')\n # For PyJWT >= 2.0.0a1\n return token", "def token(self):\n \n payload = {\n 'sub_type': self.sub_type,\n '_hash' : self._hash,\n 'jti' : str(uuid.uuid4())\n }\n return jwt.encode(payload, self.secret, self.algo).decode('utf-8')", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def serializeToken(token):\n struct = { 'refreshToken': token.refreshToken, 'token_type': token.tokenType }\n return json.dumps(struct)", "async def encode(self, payload: dict) -> str:\n delta_seconds = self.duration\n jwt_data = {\n **payload,\n \"exp\": datetime.utcnow() + timedelta(seconds=delta_seconds),\n }\n\n jwt_token = jwt.encode(jwt_data, self.jwt_secret, self.jwt_algorithm)\n token = jwt_token.decode(\"utf-8\")\n\n return token", "def jwt_encode_handler(payload):\n\n return jwt.encode(\n payload,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_ALGORITHM\n ).decode('utf-8')", "def encode_payload(payload):\n jwt_secret = app.config['SECRET_KEY']\n # expiry = 60 * 60 * 24 * 100 # 100 days\n # payload['exp'] = datetime.datetime.utcnow() + datetime.timedelta(seconds=expiry)\n encoded_jwt = jwt.encode(payload, jwt_secret, algorithm='HS256')\n\n return (encoded_jwt).decode()", "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def encode_auth_token(self, id):\n payload = {\n \"exp\": datetime.utcnow()\n + timedelta(\n days=current_app.config.get(\"TOKEN_EXPIRATION_DAYS\"),\n seconds=current_app.config.get(\"TOKEN_EXPIRATION_SECONDS\"),\n ),\n \"iat\": datetime.utcnow(),\n \"sub\": id,\n }\n return jwt.encode(\n payload, current_app.config.get(\"SECRET_KEY\"), algorithm=\"HS256\"\n )", "def encode(data):\n return jwt.encode(data, app.config[\"JWT_SECRET\"], algorithm=\"HS256\")", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'username': userdata['username'],\n 'password':userdata['password']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def encode_token(userId):\n token = jwt.encode({'userId': userId, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=20)},\n secret_key).decode('utf-8')\n return token", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'uid': userdata['uid'],\n 'pwd':userdata['pwd'],\n 'role': userdata['role']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def _create_token(self, payload, key):\n return jwt.encode(payload, key, algorithm='RS256')", "def make_token(self, data: object) -> str:\n return self.serializer.dumps(data)", "def serialize(self, expires=None):\n if self.secret_key is None:\n raise RuntimeError('no secret key defined')\n if expires:\n self['exp'] = expires\n return jwt.encode(self, self.secret_key, self.algorithm)", "def encode_auth_token(self, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=0),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def encode_auth_token(user_data, config):\n ttl_days = config.get('JWT_TTL_DAYS', 0)\n ttl_seconds = config.get('JWT_TTL_SECONDS', 0)\n secret_key = config['JWT_SECRET_KEY']\n\n now = dt.datetime.utcnow()\n try:\n payload = {\n 'exp': now + dt.timedelta(days=ttl_days, seconds=ttl_seconds),\n 'iat': now,\n 'sub': user_data\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception:\n raise", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def encode_auth_token(user_id: int, user_name:str, user_login:str, perfil_nome:str) -> bytes:\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'uid': user_id,\n 'name': user_name,\n 'login': user_login,\n 'perfil': perfil_nome,\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def get_encoded_token(expiration_from_now_s=600):\n token_payload = {'exp': int(time.time() + expiration_from_now_s)}\n token_payload.update(TOKEN_PAYLOAD_TEMPLATE)\n\n token = jwt.encode(token_payload, 'secret', algorithm='HS256')\n\n return token, token_payload", "def encode_jwt(self, claims_set):\n key = self.master_secret\n private_key = self.private_key\n if self.private_key_file is not None:\n with open(self.private_key_file, 'r') as rsa_priv_file:\n private_key = rsa_priv_file.read()\n if private_key is not None:\n key = private_key\n algorithm = self.algorithm\n token = jwt.encode(claims_set, key, algorithm)\n\n if PY3:\n token = token.decode(encoding='UTF-8')\n return token", "def encode_auth_token(user_id):\n rfexp = datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5)\n exp = int(time.time()+600)\n try:\n payload = {\n 'exp': exp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n RFpayload = {\n 'exp': rfexp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n ), jwt.encode(\n RFpayload,\n key,\n algorithm='HS512'\n )\n except Exception as e:\n return e", "def encode_auth_token(user_id, email):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=100, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'sub': email + ' ' + str(user_id)\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def encode_auth_token(self,user_id): \n try: \n exp = datetime.utcnow() + timedelta(days=1)\n \n payload = {\n 'exp': exp, \n 'iat': datetime.utcnow(), \n 'sub': user_id\n }\n \n encoded_auth_token = jwt.encode(\n payload, \n getattr(settings, \"SECRET_KEY\",\"\"),\n algorithm='HS256'\n )\n return encoded_auth_token\n except Exception as e: \n print_exception(e)\n return e", "def sign_id_token(payload):\n signing_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(TESTING_JWT_KEYSET))\n return jwt_encode(\n payload,\n signing_key,\n algorithm=\"RS256\",\n headers={\"kid\": TESTING_JWT_KEYSET[\"kid\"]},\n )", "def to_jwt(self, key=None, algorithm=\"\", lev=0):\n _jws = JWS(self.to_json(lev), alg=algorithm, typ='JWT')\n return _jws.sign_compact(key)", "def encode_auth_token(secret_key, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def generate_signed_token(self, extra_payload: dict[str, Any]) -> str:\n jwt_dict = {\n \"aud\": self._audience,\n \"iat\": datetime.utcnow(),\n \"nbf\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(seconds=self._expiration_time_in_seconds),\n }\n jwt_dict.update(extra_payload)\n token = jwt.encode(\n jwt_dict,\n self._secret_key,\n algorithm=self._algorithm,\n )\n return token", "def get_token(self, obj):\n jwt_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(obj)\n token = jwt_encode_handler(payload)\n\n return token", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def process_jwt_token(response):\n if response.status_code == HTTPStatus.OK and current_identity:\n response.headers['new_jwt'] = '{0}'.format(\n str(__encode_token().decode('utf-8'))\n )\n\n return response", "def create_jwt(user_obj):\n return jwt.encode(\n user_serializer.GetUserInfoSerializer(user_obj).data,\n settings.SECRET_KEY, algorithm='HS256').decode('utf-8')", "def get_token(self, obj):\n\n user = User.objects.get(email=obj.email)\n\n payload = jwt_payload_handler(user)\n\n if api_settings.JWT_ALLOW_REFRESH:\n payload['orig_iat'] = timegm(\n datetime.utcnow().utctimetuple()\n )\n\n token = jwt_encode_handler(payload)\n\n return token", "def get_token(cls, username):\n try:\n payload = {\n # 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),\n # 'iat': datetime.datetime.utcnow(),\n 'username': username\n }\n return jwt.encode(\n payload,\n 'meow',\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def format_jwt(user, active_customer, renewable):\n now = int(time.time())\n\n claims = {\n # reserved: https://tools.ietf.org/html/rfc7519#section-4.1\n 'exp': now + app.config['AUTH_TOKEN_LIFETIME'],\n 'nbf': now, # not before\n 'iss': app.config['AUTH_TOKEN_ISSUER'],\n 'iat': now, # issue date\n # private: https://tools.ietf.org/html/rfc7519#section-4.3\n 'user': user,\n 'active_customer': active_customer,\n 'renewable': renewable,\n }\n\n return jwt.encode(\n claims,\n key=app.config['AUTH_JWT_SECRET'],\n algorithm=app.config['AUTH_JWT_ALGORITHM'],\n )", "def generate_token(payload: Any, secret: str | List[str]) -> str:\n return url_encode_full_stops(URLSafeTimedSerializer(secret).dumps(payload, \"token\"))", "def build_jwt(payload: dict) -> str:\n if 'sub' not in payload.keys():\n raise ValueError('sub not in payload keys')\n jwt_fields = {\n 'iss': JWT_DOMAIN,\n 'sub': None,\n 'iat': datetime.datetime.utcnow(),\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=JWT_EXPIRATION_MINUTES),\n **payload\n }\n return jwt.encode(jwt_fields, key=SECRET_KEY, json_encoder=JSONDataEncoder).decode(encoding='UTF-8')", "def create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)\n }\n token = jwt.encode(payload, config.SECRET_KEY, algorithm='HS256')\n return token.decode('unicode_escape')", "def generate_token(self):\n\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=45),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_string = jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n return jwt_string\n\n except Exception as exception:\n # return an error in string format if an exception occurs\n return str(exception)", "def get_auth_token(self):\n data = [str(self.id), self.password]\n return login_serializer.dumps(data)", "def get_token(public_key,delta):\n return jwt.encode(\n {\n 'public_key':public_key,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=delta)\n },\n current_app.config['SECRET_KEY'],\n algorithm=\"HS256\"\n )", "def make_id_jwt(sub, tenant=None):\n payload = {\"sub\": sub}\n if tenant is not None:\n payload[\"mender.tenant\"] = tenant\n payload = json.dumps(payload)\n payloadb64 = b64encode(payload.encode(\"utf-8\"))\n return \"bogus_header.\" + payloadb64.decode() + \".bogus_sign\"", "def update_token(token):\n try:\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n payload['exp'] = datetime.utcnow() + timedelta(days=100)\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n raise Exception(str(e))", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def encoded_jwt(private_key, user):\n kid = JWT_KEYPAIR_FILES.keys()[0]\n scopes = ['openid']\n return generate_signed_access_token(\n kid, private_key, user, 3600, scopes, forced_exp_time=None)", "def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])", "def generate_token_string(token):\n if JWT_AUTH:\n return 'JWT {}'.format(token)\n else:\n return 'Token {}'.format(token)", "def get_jwt(self):\n if self.token is None or self.is_expired(self.token):\n self.token = self.create_jwt(self.audience, self.additional_claims)\n return self.token", "def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)", "def serialize_tokens(json_obj):\n\t# load into memory\n\tres = json.dumps(json_obj)\n\twith open(config.TOKENPATH, \"w+\") as f:\n\t\tf.write(res)\n\treturn json_obj[\"access_token\"], json_obj[\"refresh_token\"]", "def serialize_and_sign_payload(payload):\n secret = workspace_config.secret\n serializer = URLSafeTimedSerializer(secret)\n return serializer.dumps(payload)", "def test_encode_decode_token(create_user):\n user = create_user\n user_data = {\n \"email\": user.email,\n \"username\": user.username\n }\n jwt = JWTAuthentication()\n # encode token\n encoded_token = jwt.generate_token(user_data)\n assert type(encoded_token) is str # test encoding is 'utf-8'\n # decode token\n user_details = jwt.decode_token(encoded_token)\n assert user_details['userdata'] == user_data # test token details", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def validate_jwt_svid(self, token: str, audience: str) -> JwtSvid:", "def encode_u_id(u_id):\n return jwt.encode({\n \"u_id\": u_id,\n \"datetime\": json_time_translator.datetime_to_json(datetime.utcnow())\n }, '1$Arh\"1bWa/7+OS', algorithm='HS256').decode('utf-8')", "def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))", "def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)", "def generate_token(user):\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(user)\n return JWT_ENCODE_HANDLER(payload)\n else:\n token = Token.objects.create(user=user)\n token.save()\n return token", "def decodeJWT(self, token):\n try:\n return jwt.decode(token, self.secret, algorithms=[self.algorithm])\n except jwt.exceptions.InvalidSignatureError:\n raise ValueError(f'The following JWT is invalid: {token}')", "def get_jwt_for_user(user: User):\n dt = datetime.datetime.now() + datetime.timedelta(hours=10)\n return jwt.encode({'username': user.username, 'exp': dt}, jwt_key_env).decode('UTF-8')", "def get_auth_token(cls):\n return jsonify({\n 'user': current_user.serialize(),\n 'token': current_user.get_auth_token(),\n })", "def sign(self, auth_data: AuthData) -> str:\n self.claims = auth_data.extend_claims(self.token_type, self.claims)\n if self.token_type == TokenType.REFRESH and \"scope\" in self.claims:\n self.claims.pop(\"scope\")\n elif self.token_type == TokenType.AUTH and \"rid\" in self.claims:\n self.claims.pop(\"rid\")\n self.signed = pyjwt_encode(\n self.claims, auth_data.secret, auth_data.algorithm() # type: ignore\n )\n self._alg = auth_data.algorithm()\n return self.signed", "def encrypt(self, data, expires_in_s=None):\n assert isinstance(data, dict)\n\n now = time()\n dataserial = dumps(data)\n\n if expires_in_s is None:\n expires_in_s = self.expiration_s\n\n # Compress token\n cps = False\n if self.compress:\n uncompressed = dataserial.encode(encoding='utf-8')\n compressed = compress(uncompressed, Z_BEST_COMPRESSION)\n\n compresseddataserial = \\\n b64encode(compressed).decode(encoding='ascii')\n\n cprratio = ratio(dataserial, compresseddataserial)\n # Uncomment for debug. Do not leave commented on production as this\n # may leak user information in logs.\n #\n # print(\n # 'Compression ratio of {:.2f}\\n{}\\n{}\\n{}'.format(\n # cprratio, dataserial, '-' * 80, compresseddataserial,\n # )\n # )\n if cprratio > 0.0:\n dataserial = compresseddataserial\n cps = True\n\n # Build signed token\n signed = JWT(\n header={'alg': self.sign_alg},\n claims={\n # Custom claims\n 'cps': cps,\n 'dta': dataserial,\n # Standard claims\n # https://tools.ietf.org/html/rfc7519#section-4.1\n 'iss': self.issuer, # issuer name\n 'iat': now, # issued at\n 'nbf': now, # not before\n 'exp': now + expires_in_s, # expires at\n },\n )\n signed.make_signed_token(self.signkey)\n signedserial = signed.serialize()\n\n # Build encrypted token\n encrypted = JWT(\n header={'alg': self.enc_alg, 'enc': self.enc_enc},\n claims={\n # Custom claims\n 'dta': signedserial,\n # Standard claims\n # https://tools.ietf.org/html/rfc7519#section-4.1\n 'iss': self.issuer, # issuer name\n 'iat': now, # issued at\n 'nbf': now, # not before\n 'exp': now + expires_in_s, # expires at\n },\n )\n encrypted.make_encrypted_token(self.encryptkey)\n encryptedserial = encrypted.serialize()\n\n return encryptedserial", "def get_payload(cls, token):\n \n secret = cls.secret\n algo = cls.algo\n decoded = jwt.decode(token, secret, algo)\n return decoded", "def generate_token(self, user_id):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=10),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n # create the byte string encoded token using payload and SECRET key\n jwt_string = jwt.encode(\n payload,\n SECRET_KEY,\n algorithm='HS256'\n )\n return jwt_string\n except Exception as e:\n # return an error in string format if an exception occurs\n return str(e)", "def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'token': token\n }", "def create_jwt(self, audience: List[str], additional_claims=None) -> str:\n iat = time.time()\n exp = iat + self.lifetime\n payload = additional_claims or {}\n payload.update({'iss': self.credentials[\"client_email\"],\n 'sub': self.credentials[\"client_email\"],\n 'aud': audience,\n 'iat': iat,\n 'exp': exp,\n 'scope': ['email', 'openid', 'offline_access'],\n 'email': self.credentials[\"client_email\"]\n })\n additional_headers = {'kid': self.credentials[\"private_key_id\"]}\n token = jwt.encode(\n payload,\n self.credentials[\"private_key\"],\n headers=additional_headers,\n algorithm='RS256').decode()\n return token", "def get_token(self, user):\n\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n return token", "def generate_auth_token(self):\n s = Serializer(app.config['SECRET_KEY'])\n return s.dumps({'email': self.email})", "def test_jwt_example(self):\n data = r'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'\n expected = json.loads(r'''{\"header\":{\"alg\":\"HS256\",\"typ\":\"JWT\"},\"payload\":{\"sub\":\"1234567890\",\"name\":\"John Doe\",\"iat\":1516239022},\"signature\":\"49:f9:4a:c7:04:49:48:c7:8a:28:5d:90:4f:87:f0:a4:c7:89:7f:7e:8f:3a:4e:b2:25:5f:da:75:0b:2c:c3:97\"}''')\n self.assertEqual(jc.parsers.jwt.parse(data, quiet=True), expected)", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "def tokens_json(self):\n token_id, secret = self.decoded_token\n token_row = self.unauthenticated_token_row\n tokens_encoded = Fernet(secret).decrypt(\n token_row.tokens_fernet.encode('ascii'))\n return json.loads(tokens_encoded.decode('ascii'))", "def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])", "def generate_jwt(self):\n\n # Generate a random token\n random_token = secrets.token_hex(12)\n\n # Update database\n self.user_in_db.update({'token': random_token})\n User.users_db.put(self.user_in_db)\n\n # Create timestamps for the token\n generated = time.time()\n expires = generated + TWO_WEEKS\n\n # Return the generated jwt\n return manage_tokens.encode({\n 'email': self.email,\n 'token': random_token,\n 'generated': generated,\n 'expires': expires,\n })", "def encode_response(self, request, response, audience=None):\n jwt_payload = {\n \"iss\": self.node.id, \n \"aud\": request[\"iss\"] if audience is None else audience, \n \"iat\": datetime.utcnow(), \n \"exp\": datetime.utcnow() + timedelta(seconds=60),\n \"response\": response\n }\n if \"sub\" in request:\n jwt_payload[\"sub\"] = request[\"sub\"]\n # Create a JSON Web Token signed using the authorization server's private key.\n return encode_jwt(jwt_payload, self.node.node_name)", "def save_token(mail, jwt_token):\n # it saves jwt encoded string onto database\n Users.objects.filter(mail=mail).update(token=jwt_token)", "def create_jwt(key, cert, systeminfo, metadata, requestdata):\n\n claims = {}\n claims[\"iat\"] = int(time.time())\n claims[\"systeminfo\"] = systeminfo\n claims[\"metadata\"] = metadata\n claims[\"requestdata\"] = requestdata\n\n logging.debug(\"Claims:{}\".format(json_encode(claims)))\n\n token = jwt.JWT(header=json_encode(jwt_header([cert])),\n claims=json_encode(claims))\n\n token.make_signed_token(key)\n\n return token.serialize()", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def encode_response(self, request, response, audience=None):\n jwt_payload = {\n \"iss\": self.node.id, \n \"aud\": request[\"iss\"] if audience is None else audience, \n \"iat\": datetime.utcnow(), \n \"exp\": datetime.utcnow() + timedelta(seconds=60),\n \"response\": response\n }\n if \"sub\" in request:\n jwt_payload[\"sub\"] = request[\"sub\"]\n # Create a JSON Web Token signed using the authorization server's private key.\n return encode_jwt(jwt_payload, self.node)", "def validate(cls, token):\n if not cls.JWT_REGEX.match(token):\n raise ValueError('Invalid JWT token')\n\n return token", "def encode_token(http_method, url, clientKey, sharedSecret, timeout_secs=60*60,\n **ignored):\n now = int(time())\n return jwt.encode(key=sharedSecret, algorithm='HS256', payload={\n 'aud': clientKey,\n 'exp': now + timeout_secs,\n 'iat': now,\n 'iss': clientKey,\n 'qsh': hash_url(http_method, url),\n }).decode('utf8')", "def create_token(identity: int, type_token: str, exp_time: timedelta, fresh: Optional[bool] = False) -> bytes:\n if type_token not in ['access','refresh']:\n raise ValueError(\"Type token must be between access or refresh\")\n\n payload = {\n \"iat\": AuthJWT.get_int_from_datetime(datetime.now(timezone.utc)),\n \"nbf\": AuthJWT.get_int_from_datetime(datetime.now(timezone.utc)),\n \"jti\": AuthJWT.get_jwt_id(),\n \"exp\": AuthJWT.get_int_from_datetime(datetime.now(timezone.utc) + exp_time),\n \"identity\": identity,\n \"type\": type_token\n }\n\n # for access_token only fresh needed\n if type_token == 'access':\n payload['fresh'] = fresh\n\n return jwt.encode(payload,AuthJWT._SECRET_KEY,algorithm=AuthJWT._ALGORITHM)", "def create_fake_JWT_token(userEmail):\n pass", "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n\n \"token\": JWT_AUTH.get('JWT_AUTH_HEADER_PREFIX') + ' ' + token,\n \"expires_in\": datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA,\n \"user_info\": UsersLoginSerializer(user, context={'request': request}).data\n\n }", "def generate_token(dictionary: dict, expiration: datetime.timedelta):\n\n dictionary['expiration'] = (datetime.datetime.utcnow() + expiration).timestamp()\n\n return jwt.encode(dictionary, current_app.config['TOKEN_SECRET_KEY'], algorithm='HS256')", "def UserToken(self) -> object:", "def validate(self, data):\n try:\n payload = jwt.decode(data['token'], settings.SECRET_KEY, algorithms=['HS256'])\n except ExpiredSignatureError:\n raise serializers.ValidationError(\"The token has expired.\")\n except JWTError:\n raise serializers.ValidationError(\"Error validating token. Ensure is the right token.\")\n\n self.context['payload'] = payload\n return data", "def _get_jwe_token(self, request_id_obj):\n # Inspired by https://jwcrypto.readthedocs.io/en/latest/jwe.html#asymmetric-keys\n payload = json.dumps(request_id_obj)\n\n public_key = jwk.JWK.from_pem(self.cleopatra_pub)\n protected_header = {\n \"alg\": \"RSA-OAEP-256\",\n \"enc\": \"A256CBC-HS512\",\n \"typ\": \"JWE\",\n \"kid\": public_key.thumbprint(),\n }\n jwetoken = jwe.JWE(payload.encode('utf-8'),\n recipient=public_key,\n protected=protected_header)\n\n enc = jwetoken.serialize()\n return enc", "def _verified_token(self,encoded_token: bytes) -> Dict[str,Union[str,int,bool]]:\n try:\n return jwt.decode(encoded_token,self._SECRET_KEY,algorithms=self._ALGORITHM)\n except jwt.ExpiredSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.DecodeError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAlgorithmError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidKeyError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidTokenError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuerError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAudienceError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuedAtError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.ImmatureSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.MissingRequiredClaimError as err:\n raise HTTPException(status_code=422,detail=str(err))", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def get_custom_jwt(user, device):\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_otp_payload(user, device)\n return jwt_encode_handler(payload)", "def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})" ]
[ "0.75908905", "0.7349595", "0.7172348", "0.713091", "0.7116758", "0.7112599", "0.70149577", "0.6970601", "0.69599897", "0.6854216", "0.67782104", "0.6765446", "0.6761364", "0.6754647", "0.6741674", "0.6682431", "0.6675914", "0.6661425", "0.6659169", "0.66204983", "0.6594343", "0.6558486", "0.6484914", "0.64752895", "0.6465131", "0.64648163", "0.64446986", "0.6423165", "0.64121926", "0.64030427", "0.6367331", "0.6342849", "0.6307648", "0.6305936", "0.6305936", "0.6305936", "0.629262", "0.6242176", "0.62326497", "0.6214151", "0.621046", "0.61473435", "0.6124115", "0.60978484", "0.6089753", "0.6034669", "0.60239244", "0.60090435", "0.60036767", "0.5982653", "0.5976634", "0.596688", "0.59595543", "0.5952875", "0.59333044", "0.59045655", "0.588846", "0.5872576", "0.5867385", "0.5866559", "0.5863711", "0.5856655", "0.5856611", "0.5822481", "0.58060646", "0.5799394", "0.57927877", "0.5788045", "0.57855046", "0.57737935", "0.57613117", "0.5757347", "0.57272804", "0.5721713", "0.5709063", "0.57072175", "0.570508", "0.5699525", "0.56930953", "0.5680157", "0.5676486", "0.567621", "0.5675469", "0.5673869", "0.5673435", "0.56713927", "0.5669444", "0.56633556", "0.5654005", "0.56534106", "0.56242317", "0.56210387", "0.5615869", "0.5612543", "0.56103826", "0.56078124", "0.56077385", "0.5599974", "0.5588345", "0.5583027", "0.55826783" ]
0.0
-1
Token decoder decodes a JWT correctly.
def test_decode_token(token): payload = User.decode_auth_token(token) user = User.find_by_id(payload.get('id')) assert isinstance(user, User) is True assert user.email == 'adminuser@test.com'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token", "def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])", "def decodeJWT(self, token):\n try:\n return jwt.decode(token, self.secret, algorithms=[self.algorithm])\n except jwt.exceptions.InvalidSignatureError:\n raise ValueError(f'The following JWT is invalid: {token}')", "def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])", "def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])", "def decode_token(token):\n payload = None\n try:\n payload = jwt.decode(token.encode('utf-8'), '1$Arh\"1bWa/7+OS', algorithm='HS256')['u_id']\n except jwt.InvalidTokenError:\n pass\n return payload", "def decode_token(token, options=JWT_OPTIONS):\n return jwt.decode(\n token,\n SECRET_KEY,\n issuer=JWT_ISSUER,\n audience=JWT_AUDIENCE,\n options=options,\n algorithms=(JWT_OPTIONS_ALGORITHM,)\n )", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "def decode_auth_token(auth_token, config):\n secret_key = config['JWT_SECRET_KEY']\n try:\n payload = jwt.decode(auth_token, secret_key)\n return payload['sub']\n except jwt.ExpiredSignatureError as error:\n raise ExpiredToken from error\n except (jwt.InvalidTokenError, jwt.DecodeError) as error:\n raise InvalidToken from error", "def decode_payload(encoded_payload):\n jwt_secret = app.config['SECRET_KEY']\n payload = jwt.decode(encoded_payload, jwt_secret, algorithms='HS256')\n\n return payload", "def decode_token(token):\n try:\n payload = jwt.decode(\n token, app.config.get('SECRET_KEY'), algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def decode_jwt(encoded, key, algorithms = 'HS256'):\n try:\n payload = jwt.decode(\n encoded, \n key, \n algorithms = algorithms\n )\n\n return payload\n # if token has expired:\n except jwt.exceptions.ExpiredSignatureError:\n raise JWTError(\n {\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, \n 401\n )\n # other exceptions:\n except Exception:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Unable to parse authentication token.'\n }, \n 400\n )", "def decode_auth_token(secret_key, auth_token):\n try:\n payload = jwt.decode(auth_token, secret_key) \n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.' \n else: \n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"", "def decode_auth_token(auth_token): \n try: \n payload = jwt.decode(auth_token, getattr(settings, \"SECRET_KEY\", \"\"),algorithms=['HS256']) \n is_blacklisted_token = User.check_blacklist(auth_token)\n if is_blacklisted_token:\n return False,'Token blacklisted. Please log in again.'\n else:\n return True, payload['sub']\n except jwt.ExpiredSignatureError:\n return False,'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return False,'Invalid token. Please log in again.'", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n\n # is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n # if is_blacklisted_token:\n # return 'Token blacklisted. Please log in again.'\n # else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_request(self, data):\n return decode_jwt(data[\"jwt\"], data[\"cert_name\"], self.node.node_name, self.node.id)", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_auth_token(auth_token):\n if len(auth_token) != 139:\n return \"Invalid token. Please log in again.\"\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n return \"\", payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\", None\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\", None", "async def decode(self, jwt_token: str, verify=True) -> dict:\n try:\n jwt_token = jwt_token.replace(f\"{self.auth_schema} \", \"\")\n payload = jwt.decode(\n jwt_token,\n self.jwt_secret,\n algorithms=(self.jwt_algorithm,),\n options={\"verify_exp\": verify},\n )\n\n return payload\n\n except jwt.DecodeError:\n raise InvalidTokenException()\n\n except jwt.ExpiredSignatureError:\n raise TokenExpiredException()", "def decodeAuthToken(authToken):\n try:\n return jwt.decode(authToken, current_app.config['SECRET_KEY'], algorithm='HS256')['sub']\n except jwt.ExpiredSignatureError:\n return 'signature expired, Please login again'\n except jwt.InvalidTokenError:\n return 'Invalid token'", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload['role']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_token(token: str):\n try:\n decoded = b64decode(token.encode())\n key_data = orjson.loads(decoded)\n timestamp = int(key_data['t'])\n pub_key = key_data['p']\n signature = key_data['s']\n except (ValueError, TypeError, KeyError, orjson.JSONDecodeError, binascii.Error) as e:\n logging.debug(\"Invalid token format: %s\", token)\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n if timestamp > time.time() or timestamp < time.time() - TOKEN_EXPIRE_INTERVAL:\n raise HTTPException(status_code=403, detail=\"Token expired\")\n\n try:\n check_signature(\n ''.join([pub_key, str(timestamp)]),\n signature,\n pub_key\n )\n except InvalidSignature as e:\n logging.error(\"Invalid token signature. Might be access violation.\")\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n return pub_key", "def decode_jwt(self, token):\n key = self.master_secret\n public_key = self.public_key\n if self.public_key_file is not None:\n with open(self.public_key_file, 'r') as rsa_pub_file:\n public_key = rsa_pub_file.read()\n if public_key is not None:\n key = public_key\n if self.leeway is not None:\n leeway = self.leeway\n else:\n leeway = 0\n options = {\n 'verify_exp': self.verify_expiration,\n }\n try:\n claims_set = jwt.decode(\n token,\n key,\n options=options,\n leeway=leeway,\n issuer=self.issuer\n )\n except (jwt.DecodeError, jwt.ExpiredSignature):\n return None\n return claims_set", "def get_payload(cls, token):\n \n secret = cls.secret\n algo = cls.algo\n decoded = jwt.decode(token, secret, algo)\n return decoded", "def _decode(token):\n if token is None:\n return None\n # Pad the token out to be divisible by 4.\n padded_token = bytes(token, 'utf8') + '='.encode() * (4 - (len(token) % 4))\n decoded_token = base64.urlsafe_b64decode(padded_token)\n token_dict = json.loads(decoded_token)\n if not token_dict or not isinstance(token_dict, dict):\n raise ValueError('Invalid pagination token: {}').format(token_dict)\n return token_dict", "def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)", "async def validate_token(self, token: bytes, audience=None) -> Dict[str, str]:\n\n try:\n header = jwt.get_unverified_header(token)\n if \"kid\" not in header:\n raise InvalidToken(\"Missing kid in header\")\n return jwt.decode(token, await self.retrieve_public_key(self._decode_public_key_identifier(header[\"kid\"])), algorithms='RS256', issuer=tedious.config.CONFIG[\"TOKEN\"][\"issuer\"], audience=audience)\n except DecodeError:\n raise InvalidToken(\"Unable to decode token.\")\n except Exception as e:\n raise InvalidToken(str(type(e)) + \" \" + str(e))", "def decode(self, token, verify=True):\n try:\n return jwt.decode(\n token,\n self.get_verifying_key(token),\n algorithms=[self.algorithm],\n audience=self.audience,\n issuer=self.issuer,\n leeway=self.leeway,\n options={\n 'verify_aud': self.audience is not None,\n 'verify_signature': verify,\n },\n )\n except InvalidAlgorithmError as ex:\n raise TokenBackendError(_('Invalid algorithm specified')) from ex\n except InvalidTokenError:\n raise TokenBackendError(_('Token is invalid or expired'))", "def decode_token(self, token: str, max_age: int) -> Optional[object]:\n try:\n return self.serializer.loads(token, max_age)\n except (BadSignature, SignatureExpired) as e:\n return None", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def parse_token(req):\n auth_string_list = req.headers.get('Authorization').split()\n # Check in correct format i.e. Bearer: 39xds03lda0...\n if len(auth_string_list) == 1:\n raise ValueError('Authorization has invalid format')\n else:\n token = auth_string_list[1]\n data = jwt.decode(token, config.SECRET_KEY, algorithms='HS256')\n return data", "def decode(encoded):\n if encoded is None:\n return None\n\n try:\n s = decode(APP.config['SECRET_KEY'], encoded)\n return json.loads(s)\n except Exception as err:\n LOGGER.error('Error decoding auth: %s' % str(err))\n raise err", "def _verified_token(self,encoded_token: bytes) -> Dict[str,Union[str,int,bool]]:\n try:\n return jwt.decode(encoded_token,self._SECRET_KEY,algorithms=self._ALGORITHM)\n except jwt.ExpiredSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.DecodeError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAlgorithmError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidKeyError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidTokenError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuerError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAudienceError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuedAtError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.ImmatureSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.MissingRequiredClaimError as err:\n raise HTTPException(status_code=422,detail=str(err))", "def verify_decode_jwt(token):\n jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')\n jwks = json.loads(jsonurl.read())\n\n unverified_header = jwt.get_unverified_header(token)\n\n if 'kid' not in unverified_header:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Header of token must contain key id.'\n }, 401)\n\n rsa_key = {}\n for key in jwks['keys']:\n if key['kid'] == unverified_header['kid']:\n rsa_key = {\n 'kty': key['kty'],\n 'kid': key['kid'],\n 'use': key['use'],\n 'n': key['n'],\n 'e': key['e']\n }\n\n if not rsa_key:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to find appropriate key for token.'\n }, 401)\n\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer='https://' + AUTH0_DOMAIN + '/'\n )\n return payload\n except jwt.ExpiredSignatureError:\n raise AuthError({\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, 401)\n except jwt.JWTClaimsError:\n raise AuthError({\n 'code': 'invalid_claims',\n 'description': 'Incorrect claims. Please check the audience and issuer.'\n }, 401)\n except Exception:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to parse token.'\n }, 401)", "def decode_auth_token(auth_token: str) -> Union[str, int]:\n try:\n payload = jwt.decode(auth_token, key, algorithms='HS256')\n \n user=Usuario.query.filter_by(id=payload['uid']).first()\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Conta deslogada. Por favor realizar o login novamente.'\n elif user.ativo!=True:\n return 'Conta invativa. Por favor entrar em contato com o administrador.'\n else:\n return payload['uid']\n except jwt.ExpiredSignatureError:\n return 'Token expirado. Por favor realizar o login novamente.'\n except jwt.InvalidTokenError:\n return 'Token inválido. Por favor realizar o login novamente.'", "def test_decode_token_invalid(token):\n payload = User.decode_auth_token(f'{token}1337')\n assert isinstance(payload, User) is False\n assert 'Invalid token' in payload", "def test_decode_IQ_token(self):\n\n token = \"\"\"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJuYW1laWQiOiJhZGZzfHNodzAwMXNhaW50ZWxzZXdoZXJlfGpwX2FkbWluQHVybjphdXRoMDpzYWludGVsc2V3aGVyZSIsImVtYWlsIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2QGdtYWlsLmNvbSIsInVuaXF1ZV9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvYWR1c2VyZ3VpZCI6IjMveFFhZ0VrSWttcllBU0VQZHVZRmc9PSIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvZmlyc3RuYW1lIjoiQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2xhc3RuYW1lIjoiVGVzdCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvb3VuYW1lIjoiU2FpbnRFbHNld2hlcmUiLCJyb2xlIjpbIkRvbWFpbiBVc2VycyIsIkFkbWluaXN0cmF0b3IiLCJJUUdlbkhvc3BTZWMiLCJTYWludEVsc2V3aGVyZSJdLCJ1cG4iOiJKYWltaW4uUGF0ZWw4Mys1MTY0NTZAZ21haWwuY29tIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2lkZW50aXRpZXMvZGVmYXVsdC9wcm92aWRlciI6ImFkZnMiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vaWRlbnRpdGllcy9kZWZhdWx0L2Nvbm5lY3Rpb24iOiJzaHcwMDFzYWludGVsc2V3aGVyZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9pZGVudGl0aWVzL2RlZmF1bHQvaXNTb2NpYWwiOiJmYWxzZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9naXZlbl9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9waWN0dXJlIjoiaHR0cHM6Ly9zLmdyYXZhdGFyLmNvbS9hdmF0YXIvMzUxYTRiMjU4NWViM2UyYjA1NWI4ZTAyOGY4NzdmNDc_cz00ODBcdTAwMjZyPXBnXHUwMDI2ZD1odHRwcyUzQSUyRiUyRmNkbi5hdXRoMC5jb20lMkZhdmF0YXJzJTJGaXEucG5nIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL25pY2tuYW1lIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2IiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2VtYWlsX3ZlcmlmaWVkIjoidHJ1ZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9jbGllbnRJRCI6Imtrakgxd3AzdE53RmpEN0M1djI3a0oyWHFWUHE1akhtIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL3VwZGF0ZWRfYXQiOiJNb24gSmFuIDE0IDIwMTkgMTU6NTY6MTIgR01UKzAwMDAgKFVUQykiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vY3JlYXRlZF9hdCI6IkZyaSBKYW4gMTEgMjAxOSAyMDoxNToyMiBHTVQrMDAwMCAoVVRDKSIsImF1dGhtZXRob2QiOiJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL3dzLzIwMDgvMDYvaWRlbnRpdHkvYXV0aGVudGljYXRpb25tZXRob2QvcGFzc3dvcmQiLCJhdXRoX3RpbWUiOiIyMDE5LTAxLTE0VDIzOjU2OjEyLjg1M1oiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3RlbmFudGlkIjoiMjExNmU5NDMtNTA5NC00MWY3LTgzMTgtODNhYWMyYWMxMTQ3IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy9jb250ZXh0cGVyc29uaWQiOiIwYTAxMjBhMS04NTU3LTQ4MzEtYTQyNi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1lZm9ybWFsIjoiVGVzdCwgQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1laW5mb3JtYWwiOiJBZG1pbiBUZXN0IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy91c2VySWQiOiI0ZmU5OTdmZC00ZGNkLTQxNWItYjJjYi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2ZlYXR1cmV0eXBlaWQiOlsiNCIsIjIiLCIxIiwiMyIsIjUiLCI2Il0sImlzcyI6InRlbGV0cmFja2luZy5jb20iLCJhdWQiOiJodHRwOi8vd3d3LnNlcnZpY2UudGVsZXRyYWNraW5nLmNvbS8iLCJleHAiOjE1NTAwNzM0MzksIm5iZiI6MTU0NzQ4MTQzOX0.UCL-Wc3OSVDI58U5ShOYqLa-DwNc_WQ3BlY5P3CfnVI\"\"\"\n audience = 'http://www.service.teletracking.com/'\n\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'Domain Users', \"Group 1 not match\")\n self.assertTrue(decoded_token['role'][1] == 'Administrator', \"Group 2 not match\")", "def test_encode_decode_token(create_user):\n user = create_user\n user_data = {\n \"email\": user.email,\n \"username\": user.username\n }\n jwt = JWTAuthentication()\n # encode token\n encoded_token = jwt.generate_token(user_data)\n assert type(encoded_token) is str # test encoding is 'utf-8'\n # decode token\n user_details = jwt.decode_token(encoded_token)\n assert user_details['userdata'] == user_data # test token details", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithm=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.exceptions.PyJWTError:\n raise serializers.ValidationError('Invalidad token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n self.context['payload'] = payload\n return data", "def validate(self, data):\n try:\n payload = jwt.decode(data['token'], settings.SECRET_KEY, algorithms=['HS256'])\n except ExpiredSignatureError:\n raise serializers.ValidationError(\"The token has expired.\")\n except JWTError:\n raise serializers.ValidationError(\"Error validating token. Ensure is the right token.\")\n\n self.context['payload'] = payload\n return data", "def decode_token_appengine(credentials, token, verify=False):\n return _decode_token(credentials, token, False)", "def decode_jwt_refresh_token(\n encoded_refresh_token: str,\n verify_exp: bool = True) -> Dict:\n\n return jwt.decode(\n encoded_refresh_token,\n key=cfg.jwt_secret,\n algorithms=[cfg.jwt_algorithm],\n options={\"verify_exp\": verify_exp})", "def test_decode_token():\n pass", "def decode(\n self,\n keys: Union[KeyInterface, List[KeyInterface]],\n token: Union[bytes, str],\n implicit_assertion: Union[bytes, str] = b\"\",\n deserializer: Optional[Any] = None,\n aud: str = \"\",\n ) -> Token:\n\n if deserializer:\n try:\n if not callable(deserializer.loads):\n raise ValueError(\"deserializer should have loads().\")\n except AttributeError:\n raise ValueError(\"deserializer should have loads().\")\n except Exception:\n raise\n\n keys = keys if isinstance(keys, list) else [keys]\n bi = implicit_assertion if isinstance(implicit_assertion, bytes) else implicit_assertion.encode(\"utf-8\")\n\n failed = None\n t = Token.new(token)\n for k in keys:\n if k.header != t.header:\n continue\n try:\n if k.purpose == \"local\":\n t.payload = k.decrypt(t.payload, t.footer, bi)\n else:\n t.payload = k.verify(t.payload, t.footer, bi)\n try:\n if deserializer:\n t.payload = deserializer.loads(t.payload)\n except Exception as err:\n raise ValueError(\"Failed to deserialize the payload.\") from err\n if deserializer:\n try:\n if t.footer:\n t.footer = deserializer.loads(t.footer)\n except Exception:\n pass\n self._verify_registered_claims(t.payload, aud)\n return t\n except Exception as err:\n failed = err\n if failed:\n raise failed\n raise ValueError(\"key is not found for verifying the token.\")", "def verify_jwt(self, token: str):\n try:\n unverified_token = jwt.decode(token, verify=False)\n except DecodeError:\n logger.warning(f\"Failed to decode JWT without verification: {token}\", exc_info=True)\n raise NonDecodableTokenError(token)\n\n try:\n issuer = unverified_token['iss']\n except KeyError:\n raise InvalidTokenError(token)\n\n if not self.is_valid_issuer(issuer):\n logger.warning(f\"Detected a JWT with UNKNOWN ISSUER. ({issuer})\", exc_info=True)\n raise InvalidTokenError(token)\n\n public_keys = self.get_public_keys(issuer)\n token_header = jwt.get_unverified_header(token)\n\n try:\n public_key_id = token_header[\"kid\"]\n except KeyError:\n raise InvalidTokenError(token)\n\n public_key = public_keys[public_key_id]\n verification_options = dict(key=public_key,\n issuer=issuer,\n audience=config.access_token_audience_list,\n algorithms=('RS256',))\n\n try:\n return jwt.decode(token, **verification_options)\n except PyJWTError:\n logger.warning('Detected a JWT with INVALID SIGNATURE.', exc_info=True)\n raise InvalidTokenError(token)", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired.')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n\n self.context['payload'] = payload\n return data", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def decode(self, encoded):", "def test_jwt_example(self):\n data = r'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'\n expected = json.loads(r'''{\"header\":{\"alg\":\"HS256\",\"typ\":\"JWT\"},\"payload\":{\"sub\":\"1234567890\",\"name\":\"John Doe\",\"iat\":1516239022},\"signature\":\"49:f9:4a:c7:04:49:48:c7:8a:28:5d:90:4f:87:f0:a4:c7:89:7f:7e:8f:3a:4e:b2:25:5f:da:75:0b:2c:c3:97\"}''')\n self.assertEqual(jc.parsers.jwt.parse(data, quiet=True), expected)", "def validate_jwt_svid(self, token: str, audience: str) -> JwtSvid:", "def test_decoded_jwt_no_jwt_provided(\n self,\n mixin,\n mrequest,\n mdecode_jwt,\n ):\n with raises(HTTPUnauthorized):\n mixin.decoded_jwt()", "def unserialize(cls, string, secret_key, algorithm='HS256'):\n if isinstance(string, text_type):\n string = string.encode('utf-8', 'replace')\n if isinstance(secret_key, text_type):\n secret_key = secret_key.encode('utf-8', 'replace')\n\n items = jwt.decode(string, secret_key, algorithms=[algorithm])\n return cls(items, secret_key, algorithm)", "def process_jwt_token(response):\n if response.status_code == HTTPStatus.OK and current_identity:\n response.headers['new_jwt'] = '{0}'.format(\n str(__encode_token().decode('utf-8'))\n )\n\n return response", "def decode(data): #@NoSelf", "def decode_request(self, data, callback):\n _log.debug(\"decode_request:\\n\\tdata={}\\n\\tcallback={}\".format(data, callback))\n decode_jwt(data[\"jwt\"], data[\"cert_name\"], self.node,\n callback=CalvinCB(self._decode_request_cb,\n callback=callback))", "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'token': token\n }", "def test_decoded_jwt_proper_jwt_provided(\n self,\n mixin,\n mrequest,\n mdecode_jwt,\n ):\n mrequest.headers['JWT'] = 'fake-jwt'\n\n assert mixin.decoded_jwt() == mdecode_jwt.return_value\n\n mdecode_jwt.assert_called_once_with('fake-jwt')", "def decodeAccesshTokenForRefreshToken( accessToken):\n try:\n payload = jwt.decode(accessToken, ApiJWTAuthentication.secretKey_access)\n return {\"message\": \"success\",\"refresh_token\": payload['refresh_token']}\n except jwt.ExpiredSignatureError:\n return {\"message\": \"Expired Access Token\"}\n except jwt.InvalidTokenError:\n return {\"message\": \"Invalid access Token\"}", "def decoder(self):\n pass", "def token(self):\n \n payload = {\n 'sub_type': self.sub_type,\n '_hash' : self._hash,\n 'jti' : str(uuid.uuid4())\n }\n return jwt.encode(payload, self.secret, self.algo).decode('utf-8')", "def jwt_token_verify(auth_header):\n # Hug do not extract Bearer prefix\n auth_token, payload = parse_header(auth_header)\n return payload", "def validate(cls, token):\n if not cls.JWT_REGEX.match(token):\n raise ValueError('Invalid JWT token')\n\n return token", "def get_token(self, obj):\n jwt_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(obj)\n token = jwt_encode_handler(payload)\n\n return token", "def deserialize(token):\n\n if token.type == TYPE_BOOLEAN:\n return _to_boolean(token)\n elif token.type == TYPE_INTEGER:\n return _to_int(token)\n elif token.type == TYPE_FLOAT:\n return _to_float(token)\n elif token.type == TYPE_DATE:\n return _to_date(token)\n elif token.type in (TYPE_STRING, TYPE_MULTILINE_STRING, TYPE_BARE_STRING,\n TYPE_LITERAL_STRING, TYPE_MULTILINE_LITERAL_STRING):\n return _to_string(token)\n else:\n raise Exception('This should never happen!')", "def decode(self, crypto):", "def isValid(token):\n try:\n decoded = jwt.decode(token, SECRET_KEY)\n return True\n except:\n return False", "def parse_jwt_guest_token(self, raw_token: str) -> Dict[str, Any]:\n secret = current_app.config[\"GUEST_TOKEN_JWT_SECRET\"]\n algo = current_app.config[\"GUEST_TOKEN_JWT_ALGO\"]\n audience = self._get_guest_token_jwt_audience()\n return self.pyjwt_for_guest_token.decode(\n raw_token, secret, algorithms=[algo], audience=audience\n )", "def parse_and_validate(\n cls, token: str, jwt_bundle: JwtBundle, audience: List[str]\n ) -> 'JwtSvid':\n if not token:\n raise ArgumentError(INVALID_INPUT_ERROR.format('token cannot be empty'))\n\n if not jwt_bundle:\n raise ArgumentError(\n INVALID_INPUT_ERROR.format('jwt_bundle cannot be empty')\n )\n try:\n header_params = jwt.get_unverified_header(token)\n validator = JwtSvidValidator()\n validator.validate_header(header_params)\n key_id = header_params.get('kid')\n signing_key = jwt_bundle.get_jwt_authority(key_id)\n if not signing_key:\n raise AuthorityNotFoundError(key_id)\n\n public_key_pem = signing_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n ).decode('UTF-8')\n\n claims = jwt.decode(\n token,\n algorithms=header_params.get('alg'),\n key=public_key_pem,\n audience=audience,\n options={\n 'verify_signature': True,\n 'verify_aud': True,\n 'verify_exp': True,\n },\n )\n\n spiffe_id = SpiffeId.parse(claims.get('sub', None))\n\n return JwtSvid(spiffe_id, claims['aud'], claims['exp'], claims, token)\n except PyJWTError as err:\n raise InvalidTokenError(str(err))\n except ArgumentError as value_err:\n raise InvalidTokenError(str(value_err))", "def test_jwt_login_json(self):\n response = self.client.post(\n '/auth-token/',\n json.dumps(self.data),\n content_type='application/json'\n )\n\n response_content = json.loads(smart_text(response.content))\n\n decoded_payload = utils.jwt_decode_handler(response_content['token'])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(decoded_payload['username'], self.username)", "def getJWTtoken(self):\n\n token = False\n try:\n res = self.s.get(self.url + 'tokens/jwt', auth=(self.username, self.password), verify=False)\n res.raise_for_status()\n except:\n logger.error(res)\n raise\n token = vsdModels.Token(**res.json())\n try:\n payload = jwt.decode(token.tokenValue, verify=False)\n\n except jwt.InvalidTokenError as e:\n logger.error('token invalid, try using Basic Auth{0}'.format(e))\n raise\n\n return token", "def decode_token_service_key(credentials, token, verify=True):\n return _decode_token(credentials, token, verify)", "def encode(self, payload):\n jwt_payload = payload.copy()\n if self.audience is not None:\n jwt_payload['aud'] = self.audience\n if self.issuer is not None:\n jwt_payload['iss'] = self.issuer\n\n token = jwt.encode(jwt_payload, self.signing_key, algorithm=self.algorithm)\n if isinstance(token, bytes):\n # For PyJWT <= 1.7.1\n return token.decode('utf-8')\n # For PyJWT >= 2.0.0a1\n return token", "def decode_expiry_value(byte_iter):\n value_length = MMSDecoder.decode_value_length(byte_iter)\n token = byte_iter.next()\n\n if token == 0x80: # Absolute-token\n return MMSDecoder.decode_date_value(byte_iter)\n elif token == 0x81: # Relative-token\n return MMSDecoder.decode_delta_seconds_value(byte_iter)\n\n raise wsp_pdu.DecodeError('Unrecognized token value: %s' % hex(token))", "def b64_json_dec(encoded):\n json_str = base64.b64decode(encoded).decode()\n return json.loads(json_str)", "def test_destructure_decrypt_decode_verbose(self):\n encrypted = self.obj.dumps(SESSION).encode('utf-8')\n\n dec = urlsafe_b64decode(encrypted)\n key = crypto_generichash(app.secret_key, outlen=crypto_secretbox_KEYBYTES)\n n = dec[:crypto_secretbox_NONCEBYTES]\n c = dec[crypto_secretbox_NONCEBYTES:]\n m = crypto_secretbox_open(c, n, key)\n\n session = self.obj.serializer.loads(m)\n assert session[self.obj.timestamp_key]\n del session[self.obj.timestamp_key]\n assert SESSION == session", "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n\n \"token\": JWT_AUTH.get('JWT_AUTH_HEADER_PREFIX') + ' ' + token,\n \"expires_in\": datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA,\n \"user_info\": UsersLoginSerializer(user, context={'request': request}).data\n\n }", "def json_decode(json_str):\n return JSON_DECODER.decode(json_str)", "def validate_token(token):\n # first, decode the token data to determine the tenant associated with the token. We are not able to\n # check the signature until we know which tenant, and thus, which public key, to use for validation.\n try:\n data = jwt.decode(token, verify=False)\n except Exception as e:\n logger.debug(f\"got exception trying to parse data from the access_token jwt; exception: {e}\")\n raise errors.AuthenticationError(\"could not parse the access token.\")\n # get the tenant out of the jwt payload and get associated public key\n token_tenant_id = data['tenant_id']\n try:\n public_key_str = get_tenant_config(token_tenant_id)['public_key']\n except errors.BaseTapisError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; unexpected tenant_id.\")\n except KeyError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; no public key associated with the \"\n \"tenant_id.\")\n # try:\n # pub_key = get_pub_rsa_key(public_key_str)\n # except Exception as e:\n # logger.error(f\"got exception trying to create public RSA key object; e: {e} \")\n # raise errors.ServiceConfigError(\"Unable to process public key associated with tenant.\")\n try:\n return jwt.decode(token, public_key_str, algorithm='RS256')\n except Exception as e:\n logger.debug(f\"Got exception trying to decode token; exception: {e}\")\n raise errors.AuthenticationError(\"Invalid Tapis token.\")", "def _create_token(self, payload, key):\n return jwt.encode(payload, key, algorithm='RS256')", "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def decrypt_jwe(token: str, pri_key: jwk.JWK) -> Tuple[dict, dict, jwk.JWK]:\n jwe_obj = jwe.JWE()\n jwe_obj.deserialize(\n token,\n key=pri_key\n )\n cek = extract_cek(jwe_obj)\n\n return jwe_obj.jose_header, json.loads(jwe_obj.payload), cek", "def processResponse(token, enc_key, sig_key):\n payload = []\n # Decrypt encrypted token (JWE).\n enc = jwe.JWE()\n enc.deserialize(token, key=enc_key)\n payload.append(enc.payload.decode(\"utf-8\"))\n # This again contains a signed token (JWS), so we deserialize it and verify the signature.\n sig = jws.JWS()\n sig.deserialize(payload[0])\n sig.verify(sig_key)\n payload.append(sig.payload.decode(\"utf-8\"))\n return payload", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def parse_insecure(cls, token: str, expected_audience: List[str]) -> 'JwtSvid':\n if not token:\n raise ArgumentError(INVALID_INPUT_ERROR.format('token cannot be empty'))\n try:\n header_params = jwt.get_unverified_header(token)\n validator = JwtSvidValidator()\n validator.validate_header(header_params)\n claims = jwt.decode(token, options={'verify_signature': False})\n validator.validate_claims(claims, expected_audience)\n spiffe_id = SpiffeId.parse(claims['sub'])\n return JwtSvid(spiffe_id, claims['aud'], claims['exp'], claims, token)\n except PyJWTError as err:\n raise InvalidTokenError(str(err))", "def tokens_json(self):\n token_id, secret = self.decoded_token\n token_row = self.unauthenticated_token_row\n tokens_encoded = Fernet(secret).decrypt(\n token_row.tokens_fernet.encode('ascii'))\n return json.loads(tokens_encoded.decode('ascii'))", "def JsonDecode(json_str):\n return JSON_DECODER.decode(json_str)", "def check_token(token: str, secret: str | List[str], max_age_seconds: int = 60 * 60 * 24) -> Any:\n return URLSafeTimedSerializer(secret).loads(token, max_age=max_age_seconds, salt=\"token\")", "def deserialize(self, message):\n # Removes the random prefix\n message = message[12:]\n message = message.decode(\"utf-8\")\n\n if self.crypter:\n message = self.crypter.decrypt(message, self.expiry + 10)\n return json.loads(message)", "def decodeRefreshTokenForUserName( refreshToken):\n try:\n payload = jwt.decode(refreshToken, ApiJWTAuthentication.secretKey_Refresh)\n return {\"message\": \"success\",\"email\":payload['email']}\n except jwt.ExpiredSignatureError:\n return {\"message\": \"Expired Refresh Token\"}\n except jwt.InvalidTokenError:\n return {\"message\": \"Invalid Refresh Token\"}", "def _decode_public_key_identifier(identifier):\n\n return JWTAuth._get_identifier_cypher().decrypt(base64.b64decode(identifier)).decode('utf-8')", "def decode_mac_id(self, request, id):\n # There might be multiple secrets in use, if we're in the\n # process of transitioning from one to another. Try each\n # until we find one that works.\n secrets = self._get_token_secrets(request)\n for secret in secrets:\n try:\n data = tokenlib.parse_token(id, secret=secret)\n key = tokenlib.get_token_secret(id, secret=secret)\n break\n except ValueError:\n pass\n else:\n raise ValueError(\"invalid MAC id\")\n return key, data", "def decode(data):\n raise NotImplementedError", "def jwt_response_payload_handler(token, user=None, request=None):\n req = None\n print('jwt_response_payload_handler ', user)\n # Here you can use other serializers or custom logic, it's up to you!\n if isinstance(user, AnonymousUser):\n user = User.objects.get(id=user.id)\n req = {'profile': {'user': user}}\n else:\n req = {'request': request}\n return {\n 'token_decoded': jwt_decode_handler(token),\n 'token': token,\n 'user': UserSerializer(user, context=req).data\n\n }", "def get_token():\n # get authorization header:\n auth = request.headers.get('Authorization', None)\n \n # authorization header should be included:\n if auth is None:\n raise JWTError(\n {\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, \n 401\n )\n \n # authorization header should be 'Bearer [JWT]'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, \n 401\n )\n elif len(parts) == 1:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, \n 401\n )\n elif len(parts) > 2:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, \n 401\n )\n\n # extract JWT:\n token = parts[1]\n\n return token", "def get_token(public_key,delta):\n return jwt.encode(\n {\n 'public_key':public_key,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=delta)\n },\n current_app.config['SECRET_KEY'],\n algorithm=\"HS256\"\n )" ]
[ "0.8136295", "0.80802256", "0.80535865", "0.8051602", "0.7912598", "0.76581925", "0.7536502", "0.73548025", "0.7299482", "0.725423", "0.71652126", "0.7159311", "0.71550864", "0.70829904", "0.70726466", "0.70687157", "0.7039805", "0.6997798", "0.69963163", "0.6974989", "0.6951464", "0.6882611", "0.68366355", "0.68307453", "0.6741135", "0.6729666", "0.67238677", "0.66844386", "0.6665566", "0.65361506", "0.6512254", "0.64713746", "0.6440238", "0.64347196", "0.6417669", "0.6344831", "0.63265705", "0.6320189", "0.630741", "0.6296789", "0.61938894", "0.616717", "0.61546224", "0.61464524", "0.6143436", "0.61416006", "0.61326134", "0.6130451", "0.61130005", "0.6106676", "0.60598177", "0.5934586", "0.58717227", "0.5840095", "0.58380175", "0.583328", "0.5832115", "0.5819516", "0.58057815", "0.57876396", "0.57836837", "0.57774746", "0.5773497", "0.57694256", "0.57669437", "0.5737578", "0.56818926", "0.5667761", "0.5665079", "0.5646251", "0.5644077", "0.5635736", "0.5633185", "0.5614865", "0.561125", "0.5606776", "0.5601545", "0.55659944", "0.5560499", "0.55398995", "0.5534257", "0.5534018", "0.55221015", "0.5520275", "0.55192524", "0.5507151", "0.5493835", "0.54932564", "0.54842955", "0.5482646", "0.5474779", "0.54740804", "0.54660195", "0.54602283", "0.54545295", "0.5450582", "0.5442948", "0.5439345", "0.54283655", "0.5428133" ]
0.5956171
51
Token decoder returns 'Invalid token' when it's been tampered with.
def test_decode_token_invalid(token): payload = User.decode_auth_token(f'{token}1337') assert isinstance(payload, User) is False assert 'Invalid token' in payload
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token", "def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n return \"\", payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\", None\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\", None", "def decode_auth_token(secret_key, auth_token):\n try:\n payload = jwt.decode(auth_token, secret_key) \n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.' \n else: \n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def decode_token(token):\n try:\n payload = jwt.decode(\n token, app.config.get('SECRET_KEY'), algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def decode_auth_token(auth_token):\n if len(auth_token) != 139:\n return \"Invalid token. Please log in again.\"\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n\n # is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n # if is_blacklisted_token:\n # return 'Token blacklisted. Please log in again.'\n # else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_auth_token(auth_token): \n try: \n payload = jwt.decode(auth_token, getattr(settings, \"SECRET_KEY\", \"\"),algorithms=['HS256']) \n is_blacklisted_token = User.check_blacklist(auth_token)\n if is_blacklisted_token:\n return False,'Token blacklisted. Please log in again.'\n else:\n return True, payload['sub']\n except jwt.ExpiredSignatureError:\n return False,'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return False,'Invalid token. Please log in again.'", "def decode_token(token: str):\n try:\n decoded = b64decode(token.encode())\n key_data = orjson.loads(decoded)\n timestamp = int(key_data['t'])\n pub_key = key_data['p']\n signature = key_data['s']\n except (ValueError, TypeError, KeyError, orjson.JSONDecodeError, binascii.Error) as e:\n logging.debug(\"Invalid token format: %s\", token)\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n if timestamp > time.time() or timestamp < time.time() - TOKEN_EXPIRE_INTERVAL:\n raise HTTPException(status_code=403, detail=\"Token expired\")\n\n try:\n check_signature(\n ''.join([pub_key, str(timestamp)]),\n signature,\n pub_key\n )\n except InvalidSignature as e:\n logging.error(\"Invalid token signature. Might be access violation.\")\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n return pub_key", "def test_decode_token():\n pass", "def decodeJWT(self, token):\n try:\n return jwt.decode(token, self.secret, algorithms=[self.algorithm])\n except jwt.exceptions.InvalidSignatureError:\n raise ValueError(f'The following JWT is invalid: {token}')", "async def validate_token(self, token: bytes, audience=None) -> Dict[str, str]:\n\n try:\n header = jwt.get_unverified_header(token)\n if \"kid\" not in header:\n raise InvalidToken(\"Missing kid in header\")\n return jwt.decode(token, await self.retrieve_public_key(self._decode_public_key_identifier(header[\"kid\"])), algorithms='RS256', issuer=tedious.config.CONFIG[\"TOKEN\"][\"issuer\"], audience=audience)\n except DecodeError:\n raise InvalidToken(\"Unable to decode token.\")\n except Exception as e:\n raise InvalidToken(str(type(e)) + \" \" + str(e))", "def _verified_token(self,encoded_token: bytes) -> Dict[str,Union[str,int,bool]]:\n try:\n return jwt.decode(encoded_token,self._SECRET_KEY,algorithms=self._ALGORITHM)\n except jwt.ExpiredSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.DecodeError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAlgorithmError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidKeyError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidTokenError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuerError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAudienceError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuedAtError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.ImmatureSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.MissingRequiredClaimError as err:\n raise HTTPException(status_code=422,detail=str(err))", "def decode_token(token, options=JWT_OPTIONS):\n return jwt.decode(\n token,\n SECRET_KEY,\n issuer=JWT_ISSUER,\n audience=JWT_AUDIENCE,\n options=options,\n algorithms=(JWT_OPTIONS_ALGORITHM,)\n )", "async def validate_token(self, token):", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithm=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.exceptions.PyJWTError:\n raise serializers.ValidationError('Invalidad token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n self.context['payload'] = payload\n return data", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload['role']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired.')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n\n self.context['payload'] = payload\n return data", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def decode_token(token):\n payload = None\n try:\n payload = jwt.decode(token.encode('utf-8'), '1$Arh\"1bWa/7+OS', algorithm='HS256')['u_id']\n except jwt.InvalidTokenError:\n pass\n return payload", "def decode_auth_token(auth_token, config):\n secret_key = config['JWT_SECRET_KEY']\n try:\n payload = jwt.decode(auth_token, secret_key)\n return payload['sub']\n except jwt.ExpiredSignatureError as error:\n raise ExpiredToken from error\n except (jwt.InvalidTokenError, jwt.DecodeError) as error:\n raise InvalidToken from error", "def decodeAuthToken(authToken):\n try:\n return jwt.decode(authToken, current_app.config['SECRET_KEY'], algorithm='HS256')['sub']\n except jwt.ExpiredSignatureError:\n return 'signature expired, Please login again'\n except jwt.InvalidTokenError:\n return 'Invalid token'", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def decode_auth_token(auth_token: str) -> Union[str, int]:\n try:\n payload = jwt.decode(auth_token, key, algorithms='HS256')\n \n user=Usuario.query.filter_by(id=payload['uid']).first()\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Conta deslogada. Por favor realizar o login novamente.'\n elif user.ativo!=True:\n return 'Conta invativa. Por favor entrar em contato com o administrador.'\n else:\n return payload['uid']\n except jwt.ExpiredSignatureError:\n return 'Token expirado. Por favor realizar o login novamente.'\n except jwt.InvalidTokenError:\n return 'Token inválido. Por favor realizar o login novamente.'", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def validate(self, data):\n try:\n payload = jwt.decode(data['token'], settings.SECRET_KEY, algorithms=['HS256'])\n except ExpiredSignatureError:\n raise serializers.ValidationError(\"The token has expired.\")\n except JWTError:\n raise serializers.ValidationError(\"Error validating token. Ensure is the right token.\")\n\n self.context['payload'] = payload\n return data", "def decode_token_appengine(credentials, token, verify=False):\n return _decode_token(credentials, token, False)", "def decode_token(self, token: str, max_age: int) -> Optional[object]:\n try:\n return self.serializer.loads(token, max_age)\n except (BadSignature, SignatureExpired) as e:\n return None", "def verify_token(self, token: str) -> str:\n return decode(self.rd.hget(\"auth:by_token\", token))", "def token(uncapped_token):\n return uncapped_token", "def _parse_security_token(token):\n if not token:\n return None\n if ':' not in token:\n logging.warn('Malformed token: no signature separator')\n return None\n sig, body = token.split(':', 1)\n if _DISABLE_CRYPTO:\n plaintext = body\n else:\n key_storage = KeyStorage.get()\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n computed_sig = HMAC.HMAC(key=hmac_key,\n msg=body).hexdigest()\n if sig != computed_sig:\n logging.warn('Malformed token: invalid signature')\n return None\n try:\n plaintext = AES.new(key_storage.aes_key,\n AES.MODE_CBC).decrypt(body)\n except ValueError:\n logging.warn('Malformed token: wrong size')\n return None\n # Remove excess whitespace.\n plaintext = plaintext.strip()\n # The plaintext should contain at least one space.\n if ' ' not in plaintext:\n logging.warn('Malformed token: bad contents')\n return None\n parts = plaintext.split(' ')\n if len(parts) != 2:\n logging.warn('Malformed token: bad structure')\n return None\n timestamp, email = parts\n try:\n timestamp = int(timestamp, 16)\n except ValueError:\n logging.warn('Malformed token: bad timestamp')\n return None\n # Reject tokens that are too old or which have time-traveled. We\n # allow for 1s of clock skew.\n age_s = time.time() - timestamp\n if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:\n logging.warn('Malformed token: expired (age=%ds)', age_s)\n return None\n cred = _Credentials()\n cred.email = email\n cred.security_token_is_stale = (age_s > 0.5 * _TOKEN_TIMEOUT_S)\n return cred", "def check_token(token: str, secret: str | List[str], max_age_seconds: int = 60 * 60 * 24) -> Any:\n return URLSafeTimedSerializer(secret).loads(token, max_age=max_age_seconds, salt=\"token\")", "def _decode(token):\n if token is None:\n return None\n # Pad the token out to be divisible by 4.\n padded_token = bytes(token, 'utf8') + '='.encode() * (4 - (len(token) % 4))\n decoded_token = base64.urlsafe_b64decode(padded_token)\n token_dict = json.loads(decoded_token)\n if not token_dict or not isinstance(token_dict, dict):\n raise ValueError('Invalid pagination token: {}').format(token_dict)\n return token_dict", "def test_validate_token_returns_false_for_invalid_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key_2, algorithm='HS256')\n\n self.assertFalse(\n validate_token(token)[0],\n 'Failed to recognise invalidate token.'\n )", "def isValid(token):\n try:\n decoded = jwt.decode(token, SECRET_KEY)\n return True\n except:\n return False", "def token(uncapped_token: Contract):\n return uncapped_token", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def test_decode_token(token):\n payload = User.decode_auth_token(token)\n user = User.find_by_id(payload.get('id'))\n assert isinstance(user, User) is True\n assert user.email == 'adminuser@test.com'", "def test_decode_IQ_token(self):\n\n token = \"\"\"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJuYW1laWQiOiJhZGZzfHNodzAwMXNhaW50ZWxzZXdoZXJlfGpwX2FkbWluQHVybjphdXRoMDpzYWludGVsc2V3aGVyZSIsImVtYWlsIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2QGdtYWlsLmNvbSIsInVuaXF1ZV9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvYWR1c2VyZ3VpZCI6IjMveFFhZ0VrSWttcllBU0VQZHVZRmc9PSIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvZmlyc3RuYW1lIjoiQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2xhc3RuYW1lIjoiVGVzdCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvb3VuYW1lIjoiU2FpbnRFbHNld2hlcmUiLCJyb2xlIjpbIkRvbWFpbiBVc2VycyIsIkFkbWluaXN0cmF0b3IiLCJJUUdlbkhvc3BTZWMiLCJTYWludEVsc2V3aGVyZSJdLCJ1cG4iOiJKYWltaW4uUGF0ZWw4Mys1MTY0NTZAZ21haWwuY29tIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2lkZW50aXRpZXMvZGVmYXVsdC9wcm92aWRlciI6ImFkZnMiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vaWRlbnRpdGllcy9kZWZhdWx0L2Nvbm5lY3Rpb24iOiJzaHcwMDFzYWludGVsc2V3aGVyZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9pZGVudGl0aWVzL2RlZmF1bHQvaXNTb2NpYWwiOiJmYWxzZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9naXZlbl9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9waWN0dXJlIjoiaHR0cHM6Ly9zLmdyYXZhdGFyLmNvbS9hdmF0YXIvMzUxYTRiMjU4NWViM2UyYjA1NWI4ZTAyOGY4NzdmNDc_cz00ODBcdTAwMjZyPXBnXHUwMDI2ZD1odHRwcyUzQSUyRiUyRmNkbi5hdXRoMC5jb20lMkZhdmF0YXJzJTJGaXEucG5nIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL25pY2tuYW1lIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2IiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2VtYWlsX3ZlcmlmaWVkIjoidHJ1ZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9jbGllbnRJRCI6Imtrakgxd3AzdE53RmpEN0M1djI3a0oyWHFWUHE1akhtIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL3VwZGF0ZWRfYXQiOiJNb24gSmFuIDE0IDIwMTkgMTU6NTY6MTIgR01UKzAwMDAgKFVUQykiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vY3JlYXRlZF9hdCI6IkZyaSBKYW4gMTEgMjAxOSAyMDoxNToyMiBHTVQrMDAwMCAoVVRDKSIsImF1dGhtZXRob2QiOiJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL3dzLzIwMDgvMDYvaWRlbnRpdHkvYXV0aGVudGljYXRpb25tZXRob2QvcGFzc3dvcmQiLCJhdXRoX3RpbWUiOiIyMDE5LTAxLTE0VDIzOjU2OjEyLjg1M1oiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3RlbmFudGlkIjoiMjExNmU5NDMtNTA5NC00MWY3LTgzMTgtODNhYWMyYWMxMTQ3IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy9jb250ZXh0cGVyc29uaWQiOiIwYTAxMjBhMS04NTU3LTQ4MzEtYTQyNi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1lZm9ybWFsIjoiVGVzdCwgQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1laW5mb3JtYWwiOiJBZG1pbiBUZXN0IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy91c2VySWQiOiI0ZmU5OTdmZC00ZGNkLTQxNWItYjJjYi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2ZlYXR1cmV0eXBlaWQiOlsiNCIsIjIiLCIxIiwiMyIsIjUiLCI2Il0sImlzcyI6InRlbGV0cmFja2luZy5jb20iLCJhdWQiOiJodHRwOi8vd3d3LnNlcnZpY2UudGVsZXRyYWNraW5nLmNvbS8iLCJleHAiOjE1NTAwNzM0MzksIm5iZiI6MTU0NzQ4MTQzOX0.UCL-Wc3OSVDI58U5ShOYqLa-DwNc_WQ3BlY5P3CfnVI\"\"\"\n audience = 'http://www.service.teletracking.com/'\n\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'Domain Users', \"Group 1 not match\")\n self.assertTrue(decoded_token['role'][1] == 'Administrator', \"Group 2 not match\")", "def validate(cls, token):\n if not cls.JWT_REGEX.match(token):\n raise ValueError('Invalid JWT token')\n\n return token", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def parse_token(req):\n auth_string_list = req.headers.get('Authorization').split()\n # Check in correct format i.e. Bearer: 39xds03lda0...\n if len(auth_string_list) == 1:\n raise ValueError('Authorization has invalid format')\n else:\n token = auth_string_list[1]\n data = jwt.decode(token, config.SECRET_KEY, algorithms='HS256')\n return data", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)", "def test_rejects_invalid_tokens(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n self.assertFalse(tool.verify_token(\n 'ThisTokenDoesNotEvenHaveASlash', 12345, 'test_action'))\n timestamp = utils.get_timestamp(XsrfToolTests.TEST_NOW)\n self.assertFalse(\n tool.verify_token('NotTheRightDigest/%f' % timestamp, 12345,\n 'test_action'))", "def load_token(token):\n \n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n \n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n max_age = REMEMBER_COOKIE_DURATION.total_seconds()\n \n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token, max_age=max_age)\n \n #Find the User\n user = load_user(data[0])\n \n #Check Password and return user or None\n if user and data[1] == user.password:\n return user\n return None", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n return data['token']", "def get_token(alias, reg_code, privKey):\n data = json.dumps({\n \"namespace\": alias,\n \"reg_code\": reg_code\n })\n url = endpoint('auth')\n r = requests.post(url,data=data) \n token_str = (r.__dict__['_content']).decode()\n r_token_obj = json.loads(token_str)\n token_cipher = ast.literal_eval( r_token_obj[\"token\"] )\n token_obj = dict()\n token_obj = {\n \"authToken\": decrypt_message( privKey, token_cipher),\n \"expiration_minutes\": r_token_obj[\"expiration_minutes\"],\n \"expiration\": str(datetime.datetime.now() + datetime.timedelta(minutes=r_token_obj[\"expiration_minutes\"]))\n }\n expiration = token_obj[\"expiration\"]\n expiration = parser.parse(expiration)\n if datetime.datetime.now() > expiration:\n print(\"Token has expired\")\n else:\n c = expiration - datetime.datetime.now()\n valid_minutes = str(divmod(c.total_seconds(), 60)[0])\n return token_obj[\"authToken\"]", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def decode(encoded):\n if encoded is None:\n return None\n\n try:\n s = decode(APP.config['SECRET_KEY'], encoded)\n return json.loads(s)\n except Exception as err:\n LOGGER.error('Error decoding auth: %s' % str(err))\n raise err", "def decode_token(token):\n text = xlmr.decode(torch.tensor(token).long())\n return text.replace(' ', '')", "def parse_jwt_guest_token(self, raw_token: str) -> Dict[str, Any]:\n secret = current_app.config[\"GUEST_TOKEN_JWT_SECRET\"]\n algo = current_app.config[\"GUEST_TOKEN_JWT_ALGO\"]\n audience = self._get_guest_token_jwt_audience()\n return self.pyjwt_for_guest_token.decode(\n raw_token, secret, algorithms=[algo], audience=audience\n )", "def get_payload(cls, token):\n \n secret = cls.secret\n algo = cls.algo\n decoded = jwt.decode(token, secret, algo)\n return decoded", "def decode_expiry_value(byte_iter):\n value_length = MMSDecoder.decode_value_length(byte_iter)\n token = byte_iter.next()\n\n if token == 0x80: # Absolute-token\n return MMSDecoder.decode_date_value(byte_iter)\n elif token == 0x81: # Relative-token\n return MMSDecoder.decode_delta_seconds_value(byte_iter)\n\n raise wsp_pdu.DecodeError('Unrecognized token value: %s' % hex(token))", "def validate_token(token):\n # first, decode the token data to determine the tenant associated with the token. We are not able to\n # check the signature until we know which tenant, and thus, which public key, to use for validation.\n try:\n data = jwt.decode(token, verify=False)\n except Exception as e:\n logger.debug(f\"got exception trying to parse data from the access_token jwt; exception: {e}\")\n raise errors.AuthenticationError(\"could not parse the access token.\")\n # get the tenant out of the jwt payload and get associated public key\n token_tenant_id = data['tenant_id']\n try:\n public_key_str = get_tenant_config(token_tenant_id)['public_key']\n except errors.BaseTapisError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; unexpected tenant_id.\")\n except KeyError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; no public key associated with the \"\n \"tenant_id.\")\n # try:\n # pub_key = get_pub_rsa_key(public_key_str)\n # except Exception as e:\n # logger.error(f\"got exception trying to create public RSA key object; e: {e} \")\n # raise errors.ServiceConfigError(\"Unable to process public key associated with tenant.\")\n try:\n return jwt.decode(token, public_key_str, algorithm='RS256')\n except Exception as e:\n logger.debug(f\"Got exception trying to decode token; exception: {e}\")\n raise errors.AuthenticationError(\"Invalid Tapis token.\")", "def test_generate_and_validate_token(self):\n\n audience = 'http://www.service.teletracking.com/'\n roles = {'role': ['admin', 'user'], 'audience': audience}\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n token = AuthenticationHandler.generate_auth_token(roles, secret)\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'admin')\n self.assertTrue(decoded_token['role'][1] == 'user')", "def test_garbage_token(self):\n token = 'ffnnsdifsdjofjfosdjfodsjfosdjofj'\n result = self._token_checker.valid_token_to_id(token)\n self.assertEqual(result, None)", "def decode_payload(encoded_payload):\n jwt_secret = app.config['SECRET_KEY']\n payload = jwt.decode(encoded_payload, jwt_secret, algorithms='HS256')\n\n return payload", "def decode_jwt(encoded, key, algorithms = 'HS256'):\n try:\n payload = jwt.decode(\n encoded, \n key, \n algorithms = algorithms\n )\n\n return payload\n # if token has expired:\n except jwt.exceptions.ExpiredSignatureError:\n raise JWTError(\n {\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, \n 401\n )\n # other exceptions:\n except Exception:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Unable to parse authentication token.'\n }, \n 400\n )", "def validate_token(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Tries to decode the JWT token using the SECRET KEY.\n\n Executes the original function if token is valid.\n\n Otherwise returns HTTP 401 to the Client.\n\n \"\"\"\n token = request.headers.get('token')\n\n try:\n jwt.decode(token, app.config['SECRET_KEY'])\n return func(*args, **kwargs)\n except jwt.DecodeError:\n message = 'Token is missing / invalid'\n except jwt.exceptions.ExpiredSignatureError:\n message = 'Token has expired'\n\n\n return Response(\n json.dumps({'error': message}),\n 401,\n mimetype='application/json'\n )\n\n return wrapper", "def verify_token(self, token):\n return False", "def token_auth_error():\n logger.debug(\"Token authentication failed.\")\n return unauthorized(\"Invalid credentials.\")", "def update_token(token):\n try:\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n payload['exp'] = datetime.utcnow() + timedelta(days=100)\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n raise Exception(str(e))", "def deserialize_tokens():\n\ttry:\n\t\twith open(config.TOKENPATH, \"r+\") as f:\n\t\t\tcontext = f.read()\n\t\t\tres = eval(context)\n\t\t\t# load into memory\n\t\t\treturn res[\"access_token\"], res[\"refresh_token\"]\n\texcept:\n\t\t# unexcept token format\n\t\tfrom common import ApplicationException\n\t\traise ApplicationException(\"authorization file is broken, please run init\")", "def decode(\n self,\n keys: Union[KeyInterface, List[KeyInterface]],\n token: Union[bytes, str],\n implicit_assertion: Union[bytes, str] = b\"\",\n deserializer: Optional[Any] = None,\n aud: str = \"\",\n ) -> Token:\n\n if deserializer:\n try:\n if not callable(deserializer.loads):\n raise ValueError(\"deserializer should have loads().\")\n except AttributeError:\n raise ValueError(\"deserializer should have loads().\")\n except Exception:\n raise\n\n keys = keys if isinstance(keys, list) else [keys]\n bi = implicit_assertion if isinstance(implicit_assertion, bytes) else implicit_assertion.encode(\"utf-8\")\n\n failed = None\n t = Token.new(token)\n for k in keys:\n if k.header != t.header:\n continue\n try:\n if k.purpose == \"local\":\n t.payload = k.decrypt(t.payload, t.footer, bi)\n else:\n t.payload = k.verify(t.payload, t.footer, bi)\n try:\n if deserializer:\n t.payload = deserializer.loads(t.payload)\n except Exception as err:\n raise ValueError(\"Failed to deserialize the payload.\") from err\n if deserializer:\n try:\n if t.footer:\n t.footer = deserializer.loads(t.footer)\n except Exception:\n pass\n self._verify_registered_claims(t.payload, aud)\n return t\n except Exception as err:\n failed = err\n if failed:\n raise failed\n raise ValueError(\"key is not found for verifying the token.\")", "def load_token(token):\n\n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n\n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n #max_age = app.config[\"REMEMBER_COOKIE_DURATION\"].total_seconds()\n\n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token)\n\n #Find the User\n user = User.query.filter_by(email = data[0]).first()\n\n #Check Password and return user or None\n if user and data[1] == user.pwd:\n \t#On update la derniere connection du user\n \tuser.update_last_connection()\n return user\n return None\n\n\n\n\n\n\n\n #######################################################\n ################# REQUETES ############################\n #######################################################", "def test_token(self):\r\n expected = \"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3N1ZWRBdCI6ICIyMDE0LTAyLTI3VDE3OjAwOjQyLjQwNjQ0MSswOjAwIiwgImNvbnN1bWVyS2V5IjogImZha2Vfc2VjcmV0IiwgInVzZXJJZCI6ICJ1c2VybmFtZSIsICJ0dGwiOiA4NjQwMH0.Dx1PoF-7mqBOOSGDMZ9R_s3oaaLRPnn6CJgGGF2A5CQ\"\r\n response = retrieve_token(\"username\", \"fake_secret\")\r\n\r\n # because the middle hashes are dependent on time, conly the header and footer are checked for secret key\r\n self.assertEqual(expected.split('.')[0], response.split('.')[0])\r\n self.assertNotEqual(expected.split('.')[2], response.split('.')[2])", "def validate_token(self):\n r = requests.get(urljoin(self._url, Client._token_resource),\n params={\"tokenid\": self._token_id})\n\n if r.status_code == requests.status_codes.codes.unauthorized:\n raise ClientUnauthorized()\n elif r.status_code != requests.status_codes.codes.ok:\n error_messages = self._parse_invalid_request(r.text)\n raise ClientException(r.status_code, error_messages)\n\n try:\n type_, value = r.text.split(\"=\")\n value = value.strip(\" \\r\\n\")\n except Exception, e:\n raise ClientException(r.status_code,\n \"Some error has ocurred getting the result value from %s\"\n % r.text)\n\n return value == \"true\"", "def validate_token(self, token):\n try:\n self._verification = models.EmailVerification.objects.get(\n token=token,\n )\n except models.EmailVerification.DoesNotExist:\n raise serializers.ValidationError(\n code='invalid_token',\n detail=_('The provided token does not exist or has expired.'),\n )\n\n return token", "def _upgrade_token(self, http_body):\n self.token_string = auth_sub_string_from_body(http_body)", "def testTokenToDataWithBadKey(self):\n key = createKey()\n data = {u'user': u'aliafshar'}\n token = dataToToken(key, data)\n self.assertRaises(ValueError, tokenToData, createKey(), token=token)", "def UserToken(self) -> object:", "async def decode(self, jwt_token: str, verify=True) -> dict:\n try:\n jwt_token = jwt_token.replace(f\"{self.auth_schema} \", \"\")\n payload = jwt.decode(\n jwt_token,\n self.jwt_secret,\n algorithms=(self.jwt_algorithm,),\n options={\"verify_exp\": verify},\n )\n\n return payload\n\n except jwt.DecodeError:\n raise InvalidTokenException()\n\n except jwt.ExpiredSignatureError:\n raise TokenExpiredException()", "def _validate_jwt_token(self):\n # force https so that we don't send around tokens unsecurely\n url = 'https://{}/api/token/verify'.format(urlparse(self.base_url).netloc)\n \n # paranoid: check again that we only send the token to https\n if urlparse(url).scheme != \"https\":\n msg = 'This should not happen, please file a bug report.'\n raise Exception(msg)\n\n if not self.jwt_access_token:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", )\n\n # convert to json\n data = json.dumps({\"token\": self.jwt_access_token})\n # encode\n data = bytes(data, \"utf-8\")\n headers = {\"Content-Type\": \"application/json\"}\n html = urllib_request.Request(url, data=data, headers=headers)\n # decode('utf-8')\n try:\n result = urllib_request.urlopen(html).read().decode(\"utf-8\")\n dic = json.loads(result)\n valid = not bool(dic)\n if self.debug:\n print('Valid token : {}'.format(valid))\n return valid\n except urllib_error.HTTPError as e:\n return False", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def parse_insecure(cls, token: str, expected_audience: List[str]) -> 'JwtSvid':\n if not token:\n raise ArgumentError(INVALID_INPUT_ERROR.format('token cannot be empty'))\n try:\n header_params = jwt.get_unverified_header(token)\n validator = JwtSvidValidator()\n validator.validate_header(header_params)\n claims = jwt.decode(token, options={'verify_signature': False})\n validator.validate_claims(claims, expected_audience)\n spiffe_id = SpiffeId.parse(claims['sub'])\n return JwtSvid(spiffe_id, claims['aud'], claims['exp'], claims, token)\n except PyJWTError as err:\n raise InvalidTokenError(str(err))", "def deparse(token):\n\n pass", "def token(cls, token):\n user_db = User.get_by('token', token)\n if not user_db:\n raise ValueError('Sorry, your token is either invalid or expired.')\n return token", "def test_validate_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key, algorithm='HS256')\n\n self.assertTrue(\n validate_token(token)[0],\n 'Failed to validate token.'\n )", "def verifyToken():\n if request:\n data = json.dumps(request.json)\n reqToken = json.loads(data)[\"token\"]\n if len(reqToken) >= 8 and len(reqToken) <= 32:\n found = Token.query.filter(Token.token == f'{reqToken}').first()\n print(found)\n if found:\n message = \"Success! It's an older code, sir, but it checks out.\" # noqa\n else:\n message = \"Code not found.\"\n else:\n message = 'Invalid token length.'\n else:\n message = 'Invalid JSON request'\n return jsonify(status=message)", "def _generate_token_value():\n return secrets.token_urlsafe()", "def _parse_token(self, body):\n\n token_match = re.search('var\\s*token\\s*=[\\s\\']*(\\d+)', body)\n return int(token_match.group(1))", "def check_token(fn):\n def response(self, *args, **kw):\n if not JWT_DISABLED:\n intoken = get_token_from_header()\n try:\n jwt.decode(intoken, SECRET_KEY)\n except jwt.exceptions.DecodeError:\n raise Error(FORBIDDEN)\n except jwt.ExpiredSignatureError:\n raise Error(UNAUTHORIZED, msg=\"Signature expired.\")\n except jwt.InvalidTokenError:\n raise Error(UNAUTHORIZED, msg=\"Invalid token.\")\n return fn(self, *args, **kw)\n return response", "def decode(self, token, verify=True):\n try:\n return jwt.decode(\n token,\n self.get_verifying_key(token),\n algorithms=[self.algorithm],\n audience=self.audience,\n issuer=self.issuer,\n leeway=self.leeway,\n options={\n 'verify_aud': self.audience is not None,\n 'verify_signature': verify,\n },\n )\n except InvalidAlgorithmError as ex:\n raise TokenBackendError(_('Invalid algorithm specified')) from ex\n except InvalidTokenError:\n raise TokenBackendError(_('Token is invalid or expired'))", "def get_token(self):\n if not self.is_valid():\n logger.warn(\"TokenWall form data is not valid.\")\n return None\n \n tt = self.cleaned_data['token']\n logger.debug(\"Looking for token '%s'\"%tt)\n return Token.objects.get(value=tt)", "def token(self):\n return self[\"token\"]", "def _handle_token(self, token: str) -> Optional[str]:\n return token", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_live_thread_token_is_valid(self):\n assert self.token.is_valid()", "def try_get_user_id_from_token(token):\n dot_index = token.find('.')\n if (dot_index > 0):\n token_base64 = token[:dot_index]\n \n try:\n token_string = b64decode(token_base64)\n except Base64DecodeError:\n user_id = 0\n else:\n try:\n user_id = int(token_string)\n except ValueError:\n user_id = 0\n else:\n user_id = 0\n \n return user_id", "def validate_token(user, tkn):\n try:\n decoded = jwt.decode(tkn, KEY)\n if decoded['user'] == user:\n stored_token = User.get(User.username == user).token\n if stored_token == tkn:\n return True\n return False\n except jwt.ExpiredSignatureError:\n return HTTPResponse(status=400, body={\"msg\":\"Validation error.\"})", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def __get_token_data__(self):\n raise Exception(\"Implement me!\")" ]
[ "0.7511837", "0.74077404", "0.7329391", "0.7277755", "0.7243613", "0.72420645", "0.7237718", "0.721192", "0.7203428", "0.71465415", "0.71348214", "0.7133082", "0.7113959", "0.7102475", "0.69896144", "0.69554555", "0.6920015", "0.6862283", "0.68387103", "0.6809911", "0.6779272", "0.6776057", "0.67450076", "0.67388326", "0.66883916", "0.6684462", "0.66844326", "0.66766196", "0.66389626", "0.6586005", "0.65354645", "0.65225756", "0.65033656", "0.64516556", "0.636763", "0.6348911", "0.6328207", "0.6326395", "0.6320845", "0.62185454", "0.6189134", "0.61446023", "0.61303157", "0.61279887", "0.6114751", "0.60788465", "0.60724205", "0.60672265", "0.6051413", "0.6050062", "0.6039001", "0.5995329", "0.5970688", "0.5942932", "0.5940127", "0.5920564", "0.59179455", "0.59148365", "0.58964723", "0.58689094", "0.58650184", "0.5864547", "0.586064", "0.58525807", "0.583706", "0.5833181", "0.5826611", "0.5809213", "0.5806094", "0.57961196", "0.57954127", "0.57871884", "0.5768604", "0.5767196", "0.5763516", "0.574288", "0.57373255", "0.57348764", "0.5733123", "0.57198614", "0.57137287", "0.5696602", "0.56950384", "0.5694793", "0.5687075", "0.5686478", "0.567692", "0.56572956", "0.5657006", "0.56564134", "0.56355536", "0.5619015", "0.5613464", "0.5607918", "0.56041944", "0.5603158", "0.560001", "0.5595203", "0.5588218", "0.55861104" ]
0.73733056
2
Compute the output value of the neuron
def compute_output(self, input_data, no_update_wsi=False): # compute weighted sum of inputs if not no_update_wsi: self.compute_wsi(input_data) # compute output based on initialization if self.activation_type == 'step': self.output = Neuron.step_function(self.wsi) elif self.activation_type == 'sigmoidal': self.output = Neuron.sigmoidal_function(self.wsi, self.af_param) elif self.activation_type == 'hyperbolic': self.output = Neuron.hyperbolic_function(self.wsi) elif self.activation_type == 'gaussian': self.output = Neuron.gaussian_function(self.wsi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_output(self):\n x, y = self.input_nodes\n self.output_value = backend.multiply(x.output_value, y.output_value)\n return self.output_value", "def compute_output(self):\n x, y = self.input_nodes\n self.output_value = backend.add(x.output_value, y.output_value)\n return self.output_value", "def compute_output(self):\n x, y = self.input_nodes\n print(x.name, y.name)\n self.output_value = backend.dot(x.output_value, y.output_value)\n return self.output_value", "def output(self):\n # print \"Neuron output\"\n\n if self.output_cache is not None:\n # print self, \"returning from cache\"\n return self.output_cache\n\n self.inputs_cache = []\n\n sum = 0\n for input_edge in self.inputs:\n input = input_edge.from_.output()\n self.inputs_cache.append(input)\n sum += input * input_edge.w\n\n self.output_cache = sigmoid(sum)\n # print \"node output:\", self.output_cache, sum\n return self.output_cache", "def output (self) -> np.ndarray :\n return RNN.sigmoid (self.Wout.dot (self.xvec))", "def get_output(self, X):\n return X.dot(self.W) + self.b", "def compute_output_from_current_state(self):\n\n assert self.Wout is not None, \"Matrix Wout is not initialized/trained yet\"\n\n self.output_values = (self.Wout @ self.state).astype(self.typefloat)\n return self.output_values.copy().ravel()", "def neural_result(self, input):\n n_output = self.network.activate(input)\n if n_output >= 0.5:\n return 2\n else:\n return 1", "def Get_Output(self, hidden_state):\n output = tf.nn.relu(tf.matmul(hidden_state, self.Wo) + self.bo)\n\n return output", "def engage(self):\n # no sigmoid for the inputs and bias\n if layer != 0:\n self.outputValue = sigmoid(inputSum);\n\n for connection in self.outputConnections:\n if connection.enabled == True:\n #connection will have toNode\n connection.toNode.inputSum += connection.weight * self.outputValue;", "def calculateNeuronsOutputs(weights, inputs):\n y = 0\n for i in range(len(weights)):\n y = y + weights[i] * inputs[i]\n return y", "def calculate_output(self):", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_output(self, X):\n return softmax(X)", "def calculate_output(self, input_par):\r\n\r\n return self.meta_model.calculate_output(input_par)", "def get_output(self):\r\n x = self.query('OUTP?')\r\n if x == None: return None\r\n return int(x)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.bh)", "def val(self):\n return self.output", "def val(self):\n return self.output", "def get_outputs():\n all_hidden_states = get_states()\n all_attention = tf.map_fn(get_attention, all_hidden_states)\n a_values = tf.nn.softmax(all_attention, axis = 0)\n final_hidden_state = tf.einsum('ijk,ijl->jkl', a_values, \n all_hidden_states)\n output = tf.nn.sigmoid(tf.matmul(final_hidden_state[:,0,:], Wo) + bo, \n name='outputs')\n return output, a_values", "def evaluate(self, inp):\n x = torch.unsqueeze(inp, 0)\n out = self.forward(x)\n return out.max(1)[1].item()", "def out(input_lst, weight_lst, bias):\r\n return 1 / (1 + math.exp(-1 * net(input_lst, weight_lst, bias)))", "def evaluate(self, input):\n\t\tinput = np.array([-1] + list(input))\n\n\t\tfor layer in self.layers:\n\t\t\tinput = np.array([-1]+[ neuron.function(np.dot(neuron.weights, input)) for neuron in layer ])\n\n\t\treturn input[1:]", "def get_output(self, **kwargs):\n with tf.variable_scope(self.layer_scope):\n return self.out", "def sigmoid2predictions(output: torch.Tensor) -> torch.Tensor:\n return (torch.sign(output - 0.5) + 1) / 2", "def pred_from_net_output(self, net_output: ALL_NET_OUTPUT) -> torch.Tensor:\n raise NotImplementedError", "def Evaluate(self, input_data: np.ndarray) -> np.ndarray:\n if input_data.shape[0] != self.input_layer_size:\n raise IndexError(f\"Input data length is {input_data.shape[0]}, must match length of input layer size {self.input_layer_size}\")\n\n # Evaulate hidden layer given input values\n hidden_layer_values = np.zeros(self.hidden_layer_size, dtype=np.float32)\n for hidden_node_index in range(self.hidden_layer_size):\n node_value = 0\n for input_node_index in range(self.input_layer_size):\n node_value += input_data[input_node_index] * self.input_to_hidden_weights[input_node_index, hidden_node_index]\n hidden_layer_values[hidden_node_index] = sigmoid(node_value + self.hidden_layer_biases[hidden_node_index])\n\n # Evaulate output layer given hidden layer values\n output_layer_values = np.zeros(self.output_layer_size, dtype=np.float32)\n for output_node_index in range(self.output_layer_size):\n node_value = 0\n for hidden_node_index in range(self.hidden_layer_size):\n node_value += hidden_layer_values[hidden_node_index] * self.hidden_to_output_weights[hidden_node_index, output_node_index]\n output_layer_values[output_node_index] = sigmoid(node_value + self.output_layer_biases[output_node_index])\n\n return output_layer_values", "def neurons_output(self, x, w):\n k = w.shape[0]\n n = x.shape[0]\n output = np.zeros((n, k))\n for i in range(n):\n for j in range(k):\n output[i][j] = np.exp(-(x[i]-w[j])**2)/2\n return output", "def output(self, inputs):\n self._in_j = self._input(inputs) #Previous weighted inputs\n return self._g(self._in_j)", "def _objective(self,output, expected,device):\r\n #\r\n #output = output.to(device = expected.device) #\r\n \r\n output = torch.where(output == 0,self.saf,output)\r\n #output = torch.where(output == 1,torch.tensor([0.999999],device = device),output)\r\n out1 = torch.mul(-expected,torch.log(output))\r\n #out2 = torch.mul(expected-1,torch.log(1 - output))\r\n #out = torch.add(out1,out2)\r\n return torch.sum(out1, dim = 1)", "def _learn_node_parameter_var(outputs, weights, inputs):\n var = 0.\n\n \"\"\" YOUR CODE HERE \"\"\"\n temp = 0\n N_observe = outputs.shape[0]\n if inputs is None:\n temp = np.sum((outputs-weights[0])**2)\n else:\n for i in range(N_observe):\n temp += (outputs[i] - (np.sum(weights[1:] * inputs[i]) +weights[0]))**2\n var = temp/N_observe\n\n\n\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return var", "def compute_output_delta(self, target):\r\n self.compute_activation\r\n out=self.activation\r\n self.delta=out*(1-out)*(target-out)", "def test_find_highest_value_node_last(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [0.0, 0.0]\n nn.layers[3].nodes[1].weights = [1.0, 1.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '01')", "def get_hidden_values(self):\n\n return T.nnet.sigmoid(T.dot(self.x, self.W) + self.b)", "def get_hidden_output(self):\n\n\t\treturn self.activation(\n\t\t\ttheano.tensor.dot(self.symbolic_corrupted_input, self.weights) +\n\t\t\tself.bias)", "def calculate_output(self, input_par):\r\n\r\n raise NotImplementedError", "def get_output_node(self) -> WillumpGraphNode:\n return self.output_node", "def compute(self, example):\n activations = []\n if self.hidden > 0:\n for i in xrange(self.hidden):\n output = self.vis_layer[i].compute(example)\n activations.append(output)\n activations.append(1.0)\n for layer in xrange(self.layers):\n hidden_activations = []\n for i in xrange(self.hidden):\n hidden_activations.append(self.hidden_layers[layer][i].compute(activations))\n hidden_activations.append(1.0)\n activations = hidden_activations\n output = self.output_neuron.compute(activations)\n else:\n output = self.output_neuron.compute(example)\n return Network.threshold(output)", "def value(self): \r\n c = self.nd1() * self.s * math.exp(-self.div * self.t)\r\n c -= self.nd2() * self.x * math.exp(-self.rf * self.t)\r\n \r\n return c", "def execute(self):\n \n self.outvar = self.invar + .01", "def run(self):\n \n #calculate node\n print (\"{} run()\".format(self.getName()))\n\n #feed outputs\n result = 0\n for i in self.getInputPorts():\n v = i.getValue()\n # print v, self.getName()\n if v:\n result += float(v)\n\n for i in self.getOutputPorts(): #for every output port\n i.setValue(result) #set test value\n print (\"Output: {}\".format(i.getValue()))\n\n # print \"\"", "def get_value(self):\n if not self.visited:\n # first visit at node\n self.visited = True\n\n # value calculation\n for node, weight in self.predecessors:\n self.value += (node.get_value() * weight)\n\n # applying activation function\n if self.activation is not None:\n self.activation()\n\n self.calculated = True\n\n return self.value\n else:\n # visited node\n if self.calculated:\n # calculated in this computation\n return self.value\n else:\n # recurrent connection\n return self.past_value", "def __call__(self, output, target, params):\n res = self._loss(output, target, params)\n if self._fact is not None:\n res *= self._fact\n return res", "def calc(self, inputs):\n return [neuron.output(inputs) for neuron in self._neurons]", "def calculate_output(self, input_par):\r\n raise NotImplementedError", "def compute_network_output(self, params, input_data):\n\n feed_dict = dict(zip(self.network_params, params))\n feed_dict[self.X_Minibatch] = input_data\n return self.session.run(self.f_output, feed_dict=feed_dict)", "def output(self):\r\n self.logic ( )\r\n return self.output", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming * self.factor\n return self.out", "def get_state_action_value(self, state, action):\n state_tensor = torch.from_numpy(state).float().to(self.device)\n output = torch.dot(self.weights[action,:],state_tensor.view(-1))\n return output", "def output(self, inputs):\n\n inputs = np.concatenate((inputs,-np.ones((np.shape(inputs)[0],1))),axis=1)\n return self._pcnfwd(inputs)", "def generateOutputs(self):\n return np.sin(np.pi*self.x)", "def output(self):\r\n return self.result", "def output(self):\n\n\t\tself._previousError = self._getErrorFunction()\n\t\n\t\treturn self._proportionalTerm() + self._derivativeTerm() + self._integralTerm()", "def compute_output(self, a_o):\n if self.regression:\n return a_o\n else:\n return 1/(1+np.exp(-a_o))", "def val_func(self, data, label):\r\n self.net.eval()\r\n\r\n with torch.no_grad():\r\n outputs, losses = self.forward(data, label)\r\n\r\n return outputs, losses", "def get_state_action_value(self, state, action):\n state_tensor = torch.from_numpy(state).float().to(self.device)\n output = torch.dot(self.weights[action,:],state_tensor)\n return output", "def forward_propagate(self):\n for i in range(0, len(self.output_layer)):\n output = 0\n\n # Loop through each Neuron in the hidden layer\n for neuron in self.hidden_layer:\n output += neuron.weights[i] * neuron.output\n\n # Update summation for output classifier\n self.output_layer[i] = output", "def get_hidden_values(self, data):\n return T.nnet.sigmoid(T.dot(data, self.w1) + self.b1)", "def _delta(self, output, err, neuron):\n return neuron._g_prime(output) * err", "def predict(self, output):\n # output = output.float()\n if isinstance(output, list):\n output = output[-1]\n res = t.squeeze(t.mm(output, self.multiplier), dim=1)\n\n return t.round(res)", "def evaluate(self, input):\n\t\treturn self.function(np.dot(self.weights, np.array([-1] + list(input))))", "def _vout0(self,x):\n v0 = self.cool_params['vout0']\n return v0*x**0.23+3", "def __call__(self, inputs: np.ndarray):\n # Denote the impact the inputs have directly on the outputs\n output_inputs: np.ndarray = np.matmul(self.in2out, inputs.transpose()).transpose()\n \n # Denote the impact hidden nodes have on the outputs, if there are hidden nodes\n if self.n_hidden > 0:\n # Nice to know:\n # - np.transpose() will transpose the tensor\n # - np.matmul(tensor1, tensor2) will perform a matrix multiplication between tensor and tensor2\n \n # The activation is defined by:\n # - the inputs mapping to the hidden nodes\n # - the hidden nodes mapping to themselves\n # - the hidden nodes' biases\n \n # 1) Propagate the hidden nodes\n self.hidden_act = self.act_f(np.matmul(self.in2hid, inputs.transpose()).transpose() +\n np.matmul(self.hid2hid, self.hidden_act.transpose()).transpose() +\n self.hidden_biases)\n \n # 2) Execute the RNN nodes if they exists (updating current hidden state)\n for i, rnn_idx in enumerate(self.rnn_idx):\n self.rnn_state[:, i] = self.rnn_array[i](\n np.concatenate((self.in2hid[rnn_idx] * inputs,\n self.hid2hid[rnn_idx] * self.hidden_act),\n axis=1)[self.rnn_map[i]].reshape(self.bs, self.rnn_array[i].input_size)\n )\n self.hidden_act[:, rnn_idx] = self.rnn_state[:, i, 0]\n \n # 3) Propagate hidden-values to the outputs\n output_inputs += np.matmul(self.hid2out, self.hidden_act.transpose()).transpose()\n \n # Define the values of the outputs, which is the sum of their received inputs and their corresponding bias\n self.output_act = self.act_f(output_inputs + self.output_biases)\n return self.output_act", "def get_output(self, **kwargs):\n return self.out", "def neural_net_predict(self, inputs):\n for W, b in self.params:\n outputs = np.dot(inputs, W) + b\n inputs = np.tanh(outputs)\n return outputs # - logsumexp(outputs, axis=1, keepdims=True)", "def value(self, observation, prev_action, prev_reward):\n agent_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value, _rnn_state = self.model(*agent_inputs, self.prev_rnn_state)\n return value.to(\"cpu\")", "def net_input(self,X):\n return np.dot(X,self.w_[1:])+self.w_[0]", "def get_outputs_values(self):\n obsOut = numpy.zeros(self.get_num_outputs())\n i = 0\n for o in self.outputs:\n obsOut[i] = o.read_value_in_fmu(self.fmu)\n i += 1\n return obsOut", "def get_outputs(self, rov_id):\n count = 0 # Keeps count of which weight is being applied\n self.reset_layers(rov_id)\n\n # for i in range(self.n_inputs):\n # self.in_layer[rov_id, i] = self.tanh(self.in_layer[rov_id, i])\n\n for i in range(self.n_inputs): # Pass inputs to hidden layer\n for j in range(self.n_nodes):\n self.hid_layer[rov_id, j] += self.in_layer[rov_id, i] * self.weights[rov_id, count]\n count += 1\n\n for j in range(self.n_nodes): # Add Biasing Node\n self.hid_layer[rov_id, j] += (self.input_bias * self.weights[rov_id, count])\n count += 1\n\n for i in range(self.n_nodes): # Pass through sigmoid\n self.hid_layer[rov_id, i] = self.tanh(self.hid_layer[rov_id, i])\n\n for i in range(self.n_nodes): # Pass from hidden layer to output layer\n for j in range(self.n_outputs):\n self.out_layer[rov_id, j] += self.hid_layer[rov_id, i] * self.weights[rov_id, count]\n count += 1\n\n for j in range(self.n_outputs): # Add biasing node\n self.out_layer[rov_id, j] += (self.hidden_bias * self.weights[rov_id, count])\n count += 1\n\n for i in range(self.n_outputs): # Pass through sigmoid\n self.out_layer[rov_id, i] = self.tanh(self.out_layer[rov_id, i])", "def output_layer(self, x):\n if self.adaptive_softmax is None:\n if self.share_input_output_embed:\n x = F.linear(x, self.embed_tokens.weight)\n else:\n x = self.fc_out(x)\n return x", "def outputValue(self):\n string = self.ask('od;E;')\n string = re.sub(r'^(NDCV|NDCA|EDCV)', r'', string)\n self.notify('voltage', float(string))\n return float(string)", "def cost(self, output, labels, weights):\n return tf.multiply(0.5 * tf.square(output - labels), weights)", "def evaluate(self):\n RV = -self.predict()\n RV += self.Ystar()\n return RV", "def output(self, inputs):\n\n # if max pool we need to save the indices for backproping\n if self.max_pool:\n self.indices = self.f(inputs)\n out = np.take(inputs, self.indices)\n else:\n out = self.f(inputs)\n\n # save input\n self.current_input = inputs\n\n return out", "def value(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value = self.model(*model_inputs)\n return value.to(\"cpu\")", "def model_output(model, t, s, i):\n return 0, 0, 0, 0", "def output(x_tensor, num_outputs):\n # TODO: Implement Function\n out_w = tf.Variable(tf.truncated_normal([x_tensor.get_shape().as_list()[1], num_outputs],stddev=0.1,dtype=tf.float32))\n out_b = tf.Variable(tf.zeros(num_outputs))\n return tf.matmul(x_tensor,out_w)+out_b", "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def evaluate(self):\n # initialize delta_weights\n Loss = 0\n for i, x_test in enumerate(self.X_test):\n Loss += (self.sigmoid(np.dot(self.weights,x_test))-self.y_test[i])**2\n return Loss", "def op_output_values(self):\n return self.solid_output_values", "def output(self, x_tensor, num_outputs):\n shape = x_tensor.get_shape().as_list()\n weights = tf.Variable(tf.truncated_normal([shape[-1], num_outputs], mean=0, stddev=0.01))\n biases = tf.Variable(tf.zeros([num_outputs]))\n logits = tf.add(tf.matmul(x_tensor, weights), biases)\n return logits", "def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor", "def output_layer_activation(x):\n return x", "def compute(self, now, input_value):\n\n # Calculate time change. Return last output if no change.\n time_change = now - self.last_time\n if time_change <= 0:\n return self.output\n\n # Get and update constants.\n kp = self.kp\n ki = self.ki * time_change\n kd = self.kd / time_change\n\n # Compute all the working error variables.\n input_error = self.set_point - input_value\n d_input = input_value - self.last_input\n\n # Remember state for next time.\n self.last_input = input_value\n self.last_time = now\n\n\t# Factor in integral.\n self.output += ki * input_error\n\n\t# Factor in proportional-on-measurement.\n if not self.p_on_e:\n self.output -= kp * d_input\n\n\t# Factor in proportional-on-error.\n if self.p_on_e:\n self.output -= kp * input_error\n\n\t# Factor in derivative.\n self.output -= kd * d_input\n\n\t# Keep outputSum limited to legal values.\n self.output = self.clip_to_output_limits(self.output)\n return self.output", "def action(self, observations):\n # First hidden layer\n z1 = np.dot(self.w1, observations).reshape((2,1)) + self.b1\n a1 = self.relu(z1)\n \n # Second hidden layer\n z2 = np.dot(self.w2, a1) + self.b2\n a2 = self.relu(z2)\n\n # Third layer (output\n z3 = np.dot(self.w3, a2) + self.b3\n a3 = self.tanh(z3)\n \n # Get the output \n return 1 if a3 >= 0 else 0", "def get_output(self, input_arr):\n input_next_layer = []\n for p in self.n_perceptrons: # for each perceptron\n g = p.calculate(input_arr) # get the outpout \n input_next_layer.append(g) # ad to list\n return input_next_layer", "def getOutput(self):\r\n return self._output", "def cross_entropy_cost(output_out, target_out):\r\n total = 0\r\n for target_node in range(len(target_out)): # For each target data set\r\n for output_node in range(len(output_out)): # For each output node\r\n total += target_out[target_node][output_node] - target_out[target_node][output_node] * np.log(output_out[output_node]) - \\\r\n (1 - target_out[target_node][output_node]) * np.log(1 - output_out[output_node])\r\n\r\n total = 1 / total\r\n return total", "def add_output_ops(self, graph, output):\n return output", "def get_output(self, X):\n return logistic(X)", "def output(x_tensor, num_outputs):\n # TODO: Implement Function\n y = tf.layers.dense(x_tensor,num_outputs)\n return y", "def get_output(self, X):\n pass", "def get_output(self, X):\n return ReLU(X)", "def output(x_tensor, num_outputs):\n shape = x_tensor.get_shape().as_list()\n weight = tf.Variable(tf.truncated_normal([shape[-1], num_outputs], stddev=0.1))\n bias = tf.Variable(tf.zeros(num_outputs))\n return tf.add(tf.matmul(x_tensor, weight), bias)", "def evaluate_output(self, output: int) -> Callable[[str], bool]:\n raise NotImplementedError", "def kernel_output(self):\n\t\treturn self.kernel_shape_param('O')", "def getOutputNoActivation(self, inputs):\n if self.bias:\n inputs = np.concatenate((inputs, np.ones((inputs.shape[0], 1))), axis=1)\n self.outputNodes = np.dot(inputs, self.weights.T)\n return self.outputNodes" ]
[ "0.8310454", "0.7989595", "0.79795736", "0.7532791", "0.73399097", "0.70779973", "0.7050204", "0.70237565", "0.6919432", "0.6769719", "0.6748662", "0.6733867", "0.6672669", "0.6672669", "0.6672258", "0.6666061", "0.65460163", "0.6545946", "0.6545946", "0.6528449", "0.64545137", "0.64545137", "0.6451696", "0.6421775", "0.6379829", "0.635811", "0.63330114", "0.632145", "0.6310808", "0.6302511", "0.6294476", "0.62903905", "0.6285026", "0.627647", "0.6265107", "0.62453485", "0.62246627", "0.62185484", "0.6213312", "0.6207519", "0.6204456", "0.6204141", "0.61986244", "0.6196665", "0.6180413", "0.61597645", "0.615341", "0.61277527", "0.6086903", "0.6084994", "0.6078263", "0.6071527", "0.60581374", "0.60561866", "0.6054196", "0.6040433", "0.6034736", "0.6014595", "0.60114723", "0.6007975", "0.60006446", "0.5997087", "0.59892076", "0.59836334", "0.59822464", "0.59804404", "0.5976474", "0.5969289", "0.59666204", "0.5954425", "0.59533715", "0.5952877", "0.59442925", "0.5943878", "0.5939679", "0.593338", "0.59201986", "0.5919574", "0.59086174", "0.58999664", "0.5898833", "0.58984673", "0.58961535", "0.58885473", "0.5885989", "0.5881806", "0.5881161", "0.5876714", "0.5872395", "0.5870583", "0.5870555", "0.58687824", "0.5858485", "0.5856247", "0.5854821", "0.5853661", "0.5839991", "0.5833071", "0.58329505", "0.5831049" ]
0.7061461
6
Compute the weighted sum of input vectors
def compute_wsi(self, input_data, weights = None, theta_weight = None): if weights is None or theta_weight is None: self.wsi = self.theta_weight for index, value in enumerate(input_data): self.wsi = self.wsi + ( value * self.input_weights[index] ) else: self.wsi = theta_weight for index, value in enumerate(input_data): self.wsi = self.wsi + ( value * weights[index] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weighted_sum(W, X):\n\n if len(W) != len(X):\n print(\"Dimension of weight vector should be same as input vector.\")\n return\n\n else:\n H = 0\n\n for i in range(len(W)):\n H += (W[i] * X[i])\n \n return H", "def weighted_sum(self, inputs):\r\n weighted_sum = 0\r\n for i in range(self.num_inputs):\r\n weighted_sum += self.weights[i]*inputs[i]\r\n return weighted_sum", "def weighted_sum(self):\n return sum(self.wvalues)", "def sum(self):\n return np.dot(self.data.T, self.weights)", "def _input(self, inputs):\n return sum([w*i for w,i in zip(self._weights, inputs)])", "def dot(v,w):\n return sum(v_i * w_i for v_i, w_i in zip(v,w)\n\ndef sum_of_squares(v):\n return dot(v, v)\n\nimport math", "def wsum(self):\n return reduce(operator.add, self.wvalues, 0.0)", "def _weighted_sum(self, data, sum_func):\n if self.weights.shape != data.shape:\n # Add extra axes to the weights for broadcasting\n weights = np.reshape(self.weights, [len(self.weights), 1, 1])\n else:\n weights = self.weights\n\n # Turns out bn.nansum has an implementation that is not\n # precise enough for float32 sums. Doing this should\n # ensure the sums are carried out as float64\n weights = weights.astype('float64')\n weighted_sum = sum_func(data * weights, axis=0)\n return weighted_sum, weights", "def _eval(self, v):\n return super(weighted_sum_squares, self)._eval(self.weight * v)", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def weighted_sum(h):\n return h", "def vector_sum(vectors):\n\tresult = vectors[0]\n\tfor vector in vectors:\n\t\tresult = vector_add(result, vector)\n\treturn result", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def sumSet(weightedSet):\n\tsum = 0\n\tfor example in weightedSet:\n\t\tsum += example.weight\n\treturn sum", "def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result", "def compare_sum(values, weights):\n return np.sum(values.numpy())", "def call(self, inputs, mask=None):\n weights = K.expand_dims(inputs[0])\n vectors = inputs[1]\n wtd_vectors = weights * vectors\n wtd_avg = K.sum(wtd_vectors, axis=-2)\n return wtd_avg", "def vector_sum(vectors):\n results = vectors[0]\n for vector in vectors[1:]:\n results = vector_add(results, vector)\n return results", "def sum_of_squares(v: Vector) -> float:\n return dot(v,v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def dot(v,w):\n return sum(v_i * w_i for v_i,w_i in zip(v,w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def forward(self, weights):\n\n return (np.sum(np.square(weights))) * (self.lambd / 2)", "def dot(v,w):\r\n return sum(v_i * w_i\r\n for v_i, w_i in zip(v, w))", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v,w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v,w))", "def calcUnitWeight(vect, val, weights, out, wcol=None):\n if not wcol:\n wcol = out\n\n grass.run_command('v.rast.sum', zones=vect, _input=weights, column=wcol)\n grass.run_command('v.db.update', column=wcol, value=val+'/'+wcol,\n where='wcol>0')\n grass.run_command('v.to.rast', _input=vect, use='attr', \n column=wcol, output=out)\n grass.run_command('v.db.dropcol', input=vect, column=wcol)", "def dot(v, w):\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), \"vectors must be same length\"\n\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def sum_squares(v):\n\treturn dot(v, v)", "def constraint_sum(w):\n return sum(w) - 1", "def dot(v, w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v, w))", "def vector_sum(a, b):\n return a[0] + b[0], a[1] + b[1]", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), 'vectors must be the same length'\n\n return sum(v_item * w_item for v_item, w_item in zip(v, w))", "def weight_sum(self) -> Tuple[Tensor, Tensor]:\n if not self._isfit:\n return 0.0, 0.0\n e1_sum = 0.0\n e2_sum = 0.0\n for embedding in self.embedding:\n e1_sum += embedding.weight.abs().sum()\n e2_sum += (embedding.weight ** 2).sum()\n return e1_sum, e2_sum", "def calculate_weighted_results():\n pass", "def update_weights(self, pre_exist_words, hs, negative, wv, sentences,\n nonce, replication=False, sum_over_set=False,\n weighted=False, beta=1000):\n logger.info('updating layer weights')\n gained_vocab = len(wv.vocab) - len(wv.vectors)\n # newvectors = empty((gained_vocab, wv.vector_size), dtype=REAL)\n newvectors = np.zeros((gained_vocab, wv.vector_size), dtype=np.float32)\n\n # randomize the remaining words\n # FIXME as-is the code is bug-prone. We actually only want to\n # initialize the vector for the nonce, not for the remaining gained\n # vocab. This implies that the system should be run with the same\n # min_count as the pre-trained background model. Otherwise\n # we won't be able to sum as we won't have vectors for the other\n # gained background words\n if gained_vocab > 1:\n raise Exception('Creating sum vector for non-nonce word. Do '\n 'not specify a min_count when running Nonce2Vec.')\n if gained_vocab == 0:\n raise Exception('Nonce word \\'{}\\' already in test set and not '\n 'properly deleted'.format(nonce))\n for i in xrange(len(wv.vectors), len(wv.vocab)):\n # Initialise to sum\n raw_ctx, filtered_ctx = self.info.filter_sum_context(\n sentences, pre_exist_words, nonce)\n if sum_over_set or replication:\n raw_ctx = set(raw_ctx)\n filtered_ctx = set(filtered_ctx)\n logger.debug('Summing over set of context items: {}'\n .format(filtered_ctx))\n if weighted:\n logger.debug('Applying weighted sum') # Sum over positive cwi words only\n ctx_ent_map = self.info.get_ctx_ent_for_weighted_sum(\n sentences, pre_exist_words, nonce)\n if filtered_ctx:\n for w in filtered_ctx:\n # Initialise to sum\n if weighted:\n # hacky reuse of compute_cwi_alpha to compute the\n # weighted sum with cwi but compensating with\n # beta for narrow distrib of cwi\n newvectors[i-len(wv.vectors)] += wv.vectors[\n wv.vocab[w].index] * compute_cwi_alpha(\n ctx_ent_map[w], kappa=1, beta=beta, alpha=1,\n min_alpha=0)\n else:\n newvectors[i-len(wv.vectors)] += wv.vectors[\n wv.vocab[w].index]\n # If no filtered word remains, sum over everything to get 'some'\n # information\n else:\n logger.warning(\n 'No words left to sum over given filter settings. '\n 'Backtracking to sum over all raw context words')\n for w in raw_ctx:\n # Initialise to sum\n newvectors[i-len(wv.vectors)] += wv.vectors[\n wv.vocab[w].index]\n\n # Raise an error if an online update is run before initial training on\n # a corpus\n if not len(wv.vectors):\n raise RuntimeError('You cannot do an online vocabulary-update of a '\n 'model which has no prior vocabulary. First '\n 'build the vocabulary of your model with a '\n 'corpus before doing an online update.')\n\n wv.vectors = np.vstack([wv.vectors, newvectors])\n if negative:\n self.syn1neg = np.vstack([self.syn1neg,\n np.zeros((gained_vocab,\n self.layer1_size),\n dtype=np.float32)])\n wv.vectors_norm = None\n\n # do not suppress learning for already learned words\n self.vectors_lockf = np.ones(len(wv.vocab),\n dtype=np.float32)", "def sum_of_squares(vector):\n return dot(vector, vector)", "def dot(v, w):\n l = list(zip(v, w))\n return sum(v_i * w_i for v_i, w_i in l)", "def dot(self,v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def vector_weighted_average(vf, weights):\n weights_sum = weights.sum()\n y_average = (vf[:,:,0] * weights).sum() / weights_sum\n x_average = (vf[:,:,1] * weights).sum() / weights_sum\n return np.array([y_average, x_average])", "def weighted_by_sum(\n self, other):\n provenance = NQExprProvenance(\n operation='weighted_by_sum',\n inner=self.provenance,\n other=other.provenance)\n with tf.name_scope('weighted_by_sum'):\n return self.context.as_nql(\n self.tf * tf.reduce_sum(input_tensor=other.tf, axis=1, keepdims=True),\n self._type_name, provenance)", "def update_weights_sum(self):\n vals = self.nn.get_param_values()\n # only use the last layer for summation (w, b)\n self.w_sum = np.sum(vals[-2]) + np.sum(vals[-1])", "def weighted_average(listofvalues):\n total = 0\n weights = 0\n for [w, v] in listofvalues:\n total += w*v\n weights += w\n return total/weights", "def get_weights_sum(self):\n return self.w_sum", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def sumsquares(self):\n return np.dot((self.demeaned ** 2).T, self.weights)", "def SumM(v:'value', e:'error', w:'weight'=None):\n\n v = np.array(v)\n e = np.array(e)\n\n n = len(v)\n assert len(v) == len(e) \n if w is None:\n w = np.array([1.]*len(v))\n else:\n assert len(w) == len(v)\n w = np.array(w) / e**2\n wt = np.sum(w)\n w2t = np.sum(w**2)\n wti = 1/np.sum(w)\n yw = np.sum(w * v) * wti\n Qw = np.sum(w * (v - yw) ** 2)\n d2 = max(0, (Qw - (n-1)) / (wt - w2t*wti))\n wx = 1 / (e**2 + d2)\n wxti = 1 / np.sum(wx)\n a = np.sum(wx * v) * wxti\n e2 = wxti\n return a, np.sqrt(e2)", "def weighted_sum(data, dim=None, weights=None):\n if isinstance(data, xr.DataArray):\n return weighted_sum_da(data, dim, weights)\n elif isinstance(data, xr.Dataset):\n return weighted_sum_ds(data, dim, weights)\n else:\n raise ValueError('Data must be an xarray Dataset or DataArray')", "def apply_weights(self):\n w0_array = np.ones(self.N)*self.w0\n return w0_array + self.X.dot(self.w)", "def sum_of_squares(v):\n return dot(v, v)", "def sum_of_squares(v):\n return dot(v, v)", "def sum_of_squares(v):\n return dot(v, v)", "def sum_of_squares(v):\n return dot(v, v)", "def sum_of_squares(v):\n return dot_product(v, v)", "def mlp_weight_sum(self) -> Tuple[Tensor, Tensor]:\n if self._model:\n return self._model.mlp_weight_sum()\n return torch.tensor([0.0]), torch.tensor([0.0])", "def calcweighted(store):\n nobs = store['yvec'].shape[0]\n store['Upper'].put(-store['rho'], range(0, nobs - 1), range(1, nobs))\n store['Upper'].matvec(store['yvec'], store['yvectil'])\n for i in xrange(store['xmat'].shape[1]):\n store['Upper'].matvec(store['xmat'][:, i], store['xmattil'][:, i])", "def normBySum(vector):\n\treturn np.divide(vector,float(sum(vector)))", "def weights(self) -> List[float]:", "def weighted_sum_ds(ds, dim=None, weights=None):\n if weights is None:\n warn('Computing sum using equal weights for all data points')\n return ds.sum(dim)\n else:\n ds.apply(weighted_sum_da, dim=dim, weights=weights)", "def update_weights(self, weight_delta):\n\n self._weights = math_util.vector_sum(self._weights, weight_delta)", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]", "def custom_np_function(vector):\n summation = 0\n for item in vector:\n summation += item ** 2\n return summation", "def totalWeighting(distance, count, data, n):\n\n weighting = (data)*(distance)*count\n weighting = weighting/(np.sum(np.sum(weighting))) \n return weighting", "def wedge_distance(u, v):\n n_it = np.size(u)\n sum = 0\n for i in range(1, n_it):\n for j in range(i):\n sum += np.abs(u[i] * v[j] - u[j] * v[i]) ** 2\n return sum", "def wsum_rvs(mu: np.ndarray, cov: np.ndarray, w: np.ndarray\n ) -> (np.ndarray, np.ndarray):\n mu1 = mu * w # type: np.ndarray\n ndim = mu1.ndim\n # not using axis=-1, to make it work with DataFrame and Series\n mu1 = mu1.sum(axis=ndim - 1)\n cov1 = (cov * (w[..., None] * w[..., None, :])\n ).sum(axis=ndim).sum(axis=ndim - 1)\n return mu1, cov1", "def l1(weights):\n\treturn np.sum(np.abs(weights))", "def sum_of_squares(v):\n return sum(v_i * v_i for v_i in v)", "def nodalSum(val,elems,work,avg):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n if avg:\n vi = vi.sum(axis=0)/vi.shape[0]\n else:\n vi = vi.sum(axis=0)\n val[wi] = vi", "def fsum(items):\n return math.fsum(items)", "def vector_sum(vectors: List[Vector]) -> Vector:\n assert vectors, 'no vectors provided'\n\n num_elements = len(vectors[0])\n assert all(\n len(v) == num_elements for v in vectors), 'vectors must be the same length'\n\n return [sum(vec[i] for vec in vectors) for i in range(num_elements)]", "def sumsq(values):\n\n return sum(map(lambda x: x ** 2, values))", "def weighted_sum(items: list[T], weights: Sequence[float | None] = cast(Sequence[Optional[float]], None)) -> T:\n\n if weights is None:\n weights = [None] * len(items)\n\n assert len(items) == len(weights) > 0\n elem = items[0]\n unsupported_msg = 'Unsupported element type in weighted sum: {}. Value is: {}'\n\n if isinstance(elem, str):\n # Need to check this first. Otherwise it goes into sequence and causes infinite recursion.\n raise TypeError(unsupported_msg.format(type(elem), elem))\n\n try:\n if isinstance(elem, (torch.Tensor, np.ndarray, float, int, np.number)):\n if weights[0] is None:\n res = elem\n else:\n res = elem * weights[0]\n for it, weight in zip(items[1:], weights[1:]):\n if type(it) != type(elem):\n raise TypeError(f'Expect type {type(elem)} but found {type(it)}. Can not be summed')\n\n if weight is None:\n res = res + it # type: ignore\n else:\n res = res + it * weight # type: ignore\n return cast(T, res)\n\n if isinstance(elem, Mapping):\n for item in items:\n if not isinstance(item, Mapping):\n raise TypeError(f'Expect type {type(elem)} but found {type(item)}')\n if set(item) != set(elem):\n raise KeyError(f'Expect keys {list(elem)} but found {list(item)}')\n return cast(T, {\n key: weighted_sum(cast(List[dict], [cast(Mapping, d)[key] for d in items]), weights) for key in elem\n })\n if isinstance(elem, Sequence):\n for item in items:\n if not isinstance(item, Sequence):\n raise TypeError(f'Expect type {type(elem)} but found {type(item)}')\n if len(item) != len(elem):\n raise ValueError(f'Expect length {len(item)} but found {len(elem)}')\n transposed = cast(Iterable[list], zip(*items)) # type: ignore\n return cast(T, [weighted_sum(column, weights) for column in transposed])\n except (TypeError, ValueError, RuntimeError, KeyError):\n raise ValueError(\n 'Error when summing items. Value format / shape does not match. See full traceback for details.' +\n ''.join([\n f'\\n {idx}: {_summarize_elem_format(it)}' for idx, it in enumerate(items)\n ])\n )\n\n # Dealing with all unexpected types.\n raise TypeError(unsupported_msg)", "def weighted_average(items, weights):\n assert len(items) > 0\n assert len(items) == len(weights)\n # declare total as the return value which is a decimal\n total = 0.0\n # for all pairs from two lists\n for i in range(len(items)):\n \t# we increment the total for the product of both value\n \ttotal += items[i] * weights[i]\n # we return the total divided by sum of weights\n return total / sum(weights)", "def lpSum(vector):\n\treturn LpAffineExpression().addInPlace(vector)", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def axon_volume_cost(W, D):\n # Make sure self-weights are set to zero\n np.fill_diagonal(W,0)\n # Calculate cost by summing weights with distances\n return (np.triu(W)*D).sum()", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def test_weight_sums(dimensions, specification, size):\n weights = Integration(specification, size)._build(dimensions)[1]\n np.testing.assert_allclose(weights.sum(), 1, rtol=0, atol=1e-12)", "def squared_distance(v: Vector, w: Vector) -> float:\n return sum_of_squares(subtract(v, w))", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def vector_sum(vectors: List[Vector]) -> Vector:\n # Check that vectors is not empty\n assert vectors, \"no vectors provided!\"\n\n # Check the vectors are all the same size\n num_elements = len(vectors[0])\n assert all(len(v) == num_elements for v in vectors), \"different sizes!\"\n\n return [sum(vector[i] for vector in vectors)\n for i in range(num_elements)]", "def eval(self,invec):\n wsum = sum(W * i for W,i in zip(self.weights,invec))\n wsum += self.weights[self.Nvars]\n\n self.afun = self.aFun(wsum)\n self.dafundz = self.daFunDz()\n\n return [self.afun, self.dafundz]", "def vec_product(vec1: List[int], vec2: List[int]) -> int:\n return sum(map(lambda v1, v2: v1 * v2, vec1, vec2))", "def apply_weights(self):\n return self.X.dot(self.get_weights())", "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def multiply_ggn_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:\n pass", "def vector_sum(vectors: List[Vector]) -> Vector:\n # Check that vectors is not empty\n assert vectors, \"no vectors provided!\"\n\n # Check the vectors are all the same size\n num_elements = len(vectors[0])\n assert all(len(v) == num_elements for v in vectors), \"different sizes!\"\n\n # the i-th element of the result is the sum of every vector[i]\n return [sum(vector[i] for vector in vectors)\n for i in range(num_elements)]", "def _get_normalize_vec(self, feat_vec, weights=None):\n if weights is None:\n weights = self.weights\n return array([row[0] + row[feat_vec].sum() for row in weights])", "def calc_weight(base):\n return weights[base] + sum([calc_weight(i) for i in leafs[base]])", "def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)", "def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)", "def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)" ]
[ "0.82214683", "0.798617", "0.7376457", "0.7061534", "0.70244205", "0.70015985", "0.6983011", "0.6930713", "0.692332", "0.6872867", "0.68701", "0.686605", "0.6858394", "0.68492305", "0.6845547", "0.6835387", "0.6827912", "0.68278885", "0.6817227", "0.6780031", "0.672747", "0.672747", "0.672747", "0.6690922", "0.6670267", "0.6670267", "0.6670267", "0.6670267", "0.6670267", "0.6661061", "0.6656419", "0.66435766", "0.6635361", "0.657991", "0.65684277", "0.6551647", "0.6531432", "0.65187144", "0.65114444", "0.65075034", "0.649535", "0.6490026", "0.6477208", "0.64717543", "0.6468077", "0.64454234", "0.6443768", "0.6411232", "0.63931644", "0.6389736", "0.6383202", "0.63771784", "0.63665473", "0.6350941", "0.63403094", "0.63222075", "0.6312601", "0.629721", "0.629721", "0.629721", "0.629721", "0.6290399", "0.62882894", "0.62827575", "0.62826955", "0.6264795", "0.6262865", "0.6243242", "0.61804926", "0.61724865", "0.6159581", "0.61454636", "0.614262", "0.6141399", "0.6136192", "0.61339843", "0.61306286", "0.6129994", "0.61299664", "0.6120077", "0.6109483", "0.60941875", "0.60851395", "0.60756546", "0.6070086", "0.60534054", "0.6050409", "0.60399365", "0.6038999", "0.6011231", "0.6010079", "0.600794", "0.59984034", "0.59901947", "0.59819174", "0.59765124", "0.59748244", "0.59723616", "0.59691346", "0.59691346", "0.59691346" ]
0.0
-1
Update weights based on last input/output and learning rate
def update_weights(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def update_weights(x_train, y_train, weights, learning_rate):\r\n predictions = compute_prediction(x_train, weights)\r\n weights_delta = np.dot(x_train.T, y_train - predictions)\r\n m = y_train.shape[0]\r\n weights += learning_rate / float(m) * weights_delta\r\n return weights", "def update_recurrent_weights_step(self):\n \n # update weights: hebbian term\n self.delta_Wee=self.learn_rate*(self.rr[0:self.N_e]-self.input_mean)*\\\n (self.rr[0:self.N_e].T-self.input_mean)\n \n self.W_ee+=self.dt*self.delta_Wee\n\n # update weights: normalize to fixed mean of incoming and outgoing weights\n self.W_ee-=(self.W_ee.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n self.W_ee-=(self.W_ee.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n self.W_ee=np.clip(self.W_ee,0,self.W_max_ee)\n \n # update excitatory weights in the big weight matrix\n self.W[:self.N_e,:self.N_e]=self.W_ee", "def updateWeights(inputs, outputs, learning_rate, y, weights):\n for i in range(len(weights)):\n weights[i] = weights[i] + learning_rate * (outputs - y) * inputs[i]\n return weights", "def update_weights(self, X, Y, learning_rate):\n grads = self.calculate_gradients(X, Y)\n #update weights and biases\n self.weights[0] = self.weights[0] - learning_rate * grads[\"dW1\"]\n self.weights[1] = self.weights[1] - learning_rate * grads[\"dW2\"]\n self.biases[0] = self.biases[0] - learning_rate * grads[\"db1\"]\n self.biases[1] = self.biases[1] - learning_rate * grads[\"db2\"]", "def update_params(self, learning_rate=0.1):\n\n self.params['W'] = self.params['W'] - learning_rate * self.dW # update weights\n self.params['b'] = self.params['b'] - learning_rate * self.db # update bias(es)", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def weights_update(self, previous_layer_output, learning_rate, l2_lambda, batch_size):\n theta, b, delta, alpha = self.theta, self.b, self.delta, learning_rate\n dc_dtheta = np.dot(previous_layer_output, delta.T).transpose()\n dc_dtheta = np.divide(1, batch_size) * dc_dtheta\n new_theta = theta*(1 - l2_lambda * alpha) - alpha * dc_dtheta\n b_prime = np.sum(delta, axis=1).reshape(b.shape)\n b_prime = b_prime * np.divide(1, batch_size)\n new_b = b - alpha * b_prime\n self.theta, self.b = new_theta, new_b", "def update_parameters(self, learning_rate):\n for i in range(self.L - 1):\n self.W[i] -= learning_rate * self.dW[i]\n self.b[i] -= learning_rate * self.db[i]", "def pull_weights(self, learning_rate):\n for w in self.weights:\n w.value += learning_rate * w.gradient\n # Reset all the weights' gradient to 0\n # We will not reset all other units' gradient, because all other units should be initialized in next training\n # round, and the init value of gradient is 0\n for w in self.weights:\n w.gradient = 0", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def update_weights(net, input_values, desired_output, neuron_outputs, r=1):\n raise NotImplementedError", "def updatelearningrate(self, epoch):\n self.lr = getlearningrate(epoch=epoch, opt=self.opt)\n # update learning rate of model optimizer\n if isinstance(self.model, list):\n count = 0\n for param_group in self.optimzer.param_groups:\n # if type(model) is <list> then update modules with different learning rate\n param_group['lr'] = self.lr\n count += 1\n # print \">>> count is:\", count-1\n else:\n for param_group in self.optimzer.param_groups:\n param_group['lr'] = self.lr", "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()", "def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)", "def update_speed_weights_step(self):\n \n weights_list = [self.W_speed_east, self.W_speed_west,self.W_speed_north,self.W_speed_south]\n speed_input_list = [self.speed_inputs_east,self.speed_inputs_west,\n self.speed_inputs_north,self.speed_inputs_south]\n \n if self.use_eight_directions is True:\n weights_list+=[self.W_speed_north_east,\n self.W_speed_north_west,self.W_speed_south_east,self.W_speed_south_west]\n \n speed_input_list+=[self.speed_inputs_north_east,self.speed_inputs_north_west, \n self.speed_inputs_south_east,self.speed_inputs_south_west]\n\n \n for weights,speed_input in zip(weights_list,speed_input_list):\n \n \n weight_update=speed_input*(self.rr[:self.N_e]-self.input_mean)*(self.rr_e_trace.T-self.input_mean)\n weights+=self.learn_rate_speed_weights*weight_update\n\n\n # normalize to fixed mean of incoming and outgoing weights\n weights-=(weights.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n weights-=(weights.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n np.clip(weights,0,self.W_max_e,out=weights)", "def __update_weights_grad_desc(self, x_train, y_train):\n\n predictions = self.__compute_prediction(x_train)\n weights_delta = np.dot(x_train.T, y_train - predictions)\n\n m = y_train.shape[0]\n self.__weights += self.__learning_rate / float(m) * weights_delta", "def update_weights(self, BMU, currentIteration, input_data, lambda1):\n # Learning rate selection for each epoch\n lr = self.currentLearningRate(currentIteration, lambda1)\n \n # Neighborhood radius selection for each epoch\n radius = self.currentNeighbourhoodRadius(currentIteration, lambda1)\n \n # Iterating through randomly initialized weights and update weights\n for i in range(len(self.weights[0])):\n for j in range(len(self.weights)):\n tmpDist = np.power(BMU[0] - i, 2) + np.power(BMU[1] - j, 2)\n theta = np.exp(-tmpDist / (2*np.power(radius, 2)))\n for k in range(self.input_dimension):\n self.weights[i][j][k] = self.weights[i][j][k] + lr * theta * (input_data[k] - self.weights[i][j][k])", "def update_weights(self, gradient, n):\n lr_decay = self.learning_rate*(1/2)*(n/100)\n self.delta_weights = lr_decay * gradient + self.momentum*self.delta_weights\n self.weights += self.delta_weights", "def update_learning_rate(self):\n self.scheduler.step()\n lr = self.optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def adjust_weights(weights, target, learn_rate):\r\n\r\n for w in range(0, len(target)):\r\n weights[w] += learn_rate * (target[w] - weights[w])", "def train(self, inputs, desired):\n inputs.append(1) # bias input\n guess = self.feedforward(inputs)\n error = desired - guess\n for i in range(len(self.weights)):\n self.weights[i] = self.weights[i] + \\\n self.learning_rate * error * inputs[i]", "def update_weights(self):\r\n\r\n inedges=self.in_edges\r\n for edge in inedges:\r\n weight=edge.weight+self.learning_rate*self.delta*(edge.source.activation)\r\n edge.change_weight(weight)", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def update(self):\n\n self._eps_count += 1\n if self._replay.size >= self._min_replay_size:\n for _ in range(self._learning_updates):\n samples_indices, minibatch = self._replay.sample(self._batch_size)\n tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]\n self._learn(*tf_minibatch)\n\n self._learn_iter_counter += 1\n if (self._target_update_period > 1) and (self._learn_iter_counter % self._target_update_period == 0):\n self._update_target_nets()", "def update_learning_rate(self, it):\n self.scheduler.step()\n for param_group in self.optimizer.param_groups:\n v = param_group['lr']\n self.tb_logger.add_scalar('train/lr', v, it)", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost", "def update_step(image_batch, label_batch, model, learning_rate):\n f = model.forward(image_batch)\n gradient = model.backward(f,label_batch)\n model.w = model.w - learning_rate*gradient", "def update_model(self) -> torch.Tensor:\n # PER needs beta to calculate weights\n samples = self.memory.sample_batch(self.beta)\n weights = torch.FloatTensor(\n samples[\"weights\"].reshape(-1, 1)\n ).to(self.device)\n indices = samples[\"indices\"]\n \n # 1-step Learning loss\n elementwise_loss = self._compute_dqn_loss(samples, self.gamma)\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n \n # N-step Learning loss\n # we are gonna combine 1-step loss and n-step loss so as to\n # prevent high-variance. The original rainbow employs n-step loss only.\n if self.use_n_step:\n gamma = self.gamma ** self.n_step\n samples = self.memory_n.sample_batch_from_idxs(indices)\n elementwise_loss_n_loss = self._compute_dqn_loss(samples, gamma)\n elementwise_loss += elementwise_loss_n_loss\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n\n self.optimizer.zero_grad()\n loss.backward()\n clip_grad_norm_(self.dqn.parameters(), 10.0)\n self.optimizer.step()\n \n # PER: update priorities\n loss_for_prior = elementwise_loss.detach().cpu().numpy()\n new_priorities = loss_for_prior + self.prior_eps\n self.memory.update_priorities(indices, new_priorities)\n \n # NoisyNet: reset noise\n self.dqn.reset_noise()\n self.dqn_target.reset_noise()\n\n return loss.item()", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error= (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost =0.5* error**2\n return cost", "def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return", "def update_learning_rate(self):\r\n self.scheduler.step(self.clock.epoch)", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\tfor layer in self._layers:\n\t\t\tlayer.update_params(learning_rate)\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def update_weights_sum(self):\n vals = self.nn.get_param_values()\n # only use the last layer for summation (w, b)\n self.w_sum = np.sum(vals[-2]) + np.sum(vals[-1])", "def update(self, values, train, eta=.1):\n\t\tfor X, y_true in zip(values, train):\n\t\t\tprediction = self.activate(X)\n\t\t\terror = y_true - prediction\n\t\t\tweight_update = error * eta * X\n\t\t\tself.weights += weight_update", "def update_learning_rate(self):\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def update_weights(self, example):\n pred = self.predict(example)\n if pred != example.label:\n self.weights[example.label] = self.weights[example.label] + example.fvector\n self.weights[pred] = self.weights[pred] - example.fvector", "def _determine_new_weight(self, weight, input, currentNeuron, bmu):\n return weight \\\n + (self.neighborhood.fn(currentNeuron, bmu) \\\n * self.learning_rate * (input - weight))", "def update(self, weights, grads):\n learn_rate_t = self.learn_rate\n\n new_weights = []\n for w, g in zip(weights, grads):\n\n # Apply weight decay\n if w.shape[1] > 1:\n # Weight matrix\n g_t = learn_rate_t * (g + self.weight_decay * w)\n else:\n # Bias matrix\n g_t = learn_rate_t * g\n w_t = w - g_t\n new_weights.append(w_t)\n return new_weights", "def compute_weights_init(self, size_max=30, learning_rate=0.1):\n \n self.weights = [0] * len(self.models)\n self.last_values = pd.DataFrame()\n \n self.predict[\"weights\"] = self.predict['error'].apply(lambda x: self.compute_weights(x))\n \n self.predict[\"final\"] = self.predict[[*self.models.keys()]].apply(lambda x: self.compute_final_values(x), axis=1)\n \n self.error[\"mae final\"] = self.predict[[\"final\", \"test\"]].apply(lambda x: mae(x), axis=1)\n self.error[\"mape final\"] = self.predict[[\"final\", \"test\"]].apply(lambda x: MAPE(x[0], x[1]), axis=1)", "def update_weight_ratio(self):\n # Ratio for weights from input to hidden layer\n param_scale_ih = np.linalg.norm(self.weights_ih.ravel())\n ih_update = -learning_rate * self.d_wght_ih\n update_scale_ih = np.linalg.norm(ih_update.ravel())\n self.log.info(\"updates:weights for weights_ih: %f\" %\n (update_scale_ih / param_scale_ih))\n # Ratio for weights from hidden layer to hidden layer\n param_scale_hh = np.linalg.norm(self.weights_hh.ravel())\n hh_update = -learning_rate * self.d_wght_hh\n update_scale_hh = np.linalg.norm(hh_update.ravel())\n self.log.info(\"updates:weights for weights_hh: %f\" %\n (update_scale_hh / param_scale_hh))\n # Ratio for weights from hidden layer to output layer\n param_scale_ho = np.linalg.norm(self.weights_ho.ravel())\n ho_update = -learning_rate * self.d_wght_ho\n update_scale_ho = np.linalg.norm(ho_update.ravel())\n self.log.info(\"updates:weights for weights_hh: %f\" %\n (update_scale_ho / param_scale_ho))", "def update_weight(self, x_input: np.ndarray) -> None:\n if self.layers[-1].delta is None:\n raise ValueError(\"No derivative calculated yet\")\n layer_input = x_input.copy()\n if np.ndim(x_input) == 1:\n layer_input = layer_input.reshape(-1, 1)\n for layer in self.layers:\n layer.update_weights(layer_input, self.learning_rate)\n layer_input = layer.output", "def update(self):\n # Update the weight matrix: \n self.W -= self.lr * self.grad_W \n \n # Update the bias matrices:\n self.b -= self.lr * np.array(self.grad_b) \n self.c -= self.lr * np.array(self.grad_c)", "def update_params(self, update_weights, update_bias):\n\n if not self.trainable:\n return\n\n update_weights = np.squeeze(update_weights)\n update_bias = np.squeeze(update_bias)\n\n # some have non-trainable parameters, in addition to the\n # weights and biases\n if len(self.params) == 2:\n self.params = (self.params[0] + update_weights, self.params[1] + update_bias)\n else:\n self.params = (self.params[0] + update_weights, self.params[1] + update_bias) + self.params[2:]\n\n # create updated function\n self.f = self.gen_f(self.params, self.output_shape)", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def train(self, _inputs, label, learning_rate):\n\n current_guess = self.predict(_inputs)\n err = label - current_guess\n\n for i in range(self.input_size):\n self.weights[i] += err*_inputs[i]*learning_rate", "def learn(self, input, trueOutput):\n try:\n prediction= self.feed(input)\n diff = trueOutput - prediction\n inputArray = [1]\n for i in input:\n inputArray.append(i)\n temp = np.array(inputArray)\n temp=np.transpose(temp)\n for i in range(len(self.weights)):\n self.weights[i]+=self.lr*diff*temp[i]\n except ValueError:\n print(\"One of the Inputs is not a number\")\n raise ValueError", "def test_net_weight_update(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.update_weights([2, 3], [0], test=True)\n\n test_weight = nn.layers[-1].nodes[0].weights[0]\n self.assertEqual(round(test_weight, 4), 0.9901)", "def _update_weight(self, time):\r\n # Until the relative time window, return original weights.\r\n if time < self.window - 1:\r\n return self.weights\r\n\r\n # Set the current predicted relatives value.\r\n current_prediction = self._calculate_predicted_relatives(time)\r\n\r\n # Set the deviation from the mean of current prediction.\r\n predicted_deviation = current_prediction - np.ones(self.number_of_assets) * np.mean(\r\n current_prediction)\r\n\r\n # Calculate alpha, the lagrangian multiplier.\r\n norm2 = np.linalg.norm(predicted_deviation, ord=1) ** 2\r\n\r\n # If norm2 is zero, return previous weights.\r\n if norm2 == 0:\r\n return self.weights\r\n alpha = np.minimum(0, (current_prediction * self.weights - self.epsilon) / norm2)\r\n\r\n # Update new weights.\r\n new_weights = self.weights - alpha * predicted_deviation\r\n\r\n # Project to simplex domain.\r\n new_weights = self._simplex_projection(new_weights)\r\n\r\n return new_weights", "def update_weights(self, X: np.ndarray, y: np.ndarray, learning_rate: float, reg_coeff: float):\n ################################################################################\n # TODO: Compute the gradient of loss computed above w.r.t the svm weights. #\n # and then update self.w with the computed gradient. #\n # (don't forget learning rate and reg_coeff in update rule) #\n # Don't forget L2-regularization term in your implementation! #\n ################################################################################\n\n # write your code here\n N = len(X)\n gradient = ((-1 * np.sum(((X.T * y).T)[np.where((1 - np.matmul(X, self.weights) * y) > 0)], axis=0)) / N) + reg_coeff * self.weights\n self.weights -= learning_rate * gradient\n\n\n ################################################################################\n # END OF YOUR CODE #\n ################################################################################", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)", "def on_epoch_end(self, state: _State):\n if self.decouple_weight_decay:\n optimizer = self._optimizer\n for i, wd in enumerate(self._optimizer_wd):\n optimizer.param_groups[i][\"weight_decay\"] = wd", "def store_iter_weights(self):\n self.w_after_iter.append(self.nn.get_param_values())", "def _update(self, x: np.ndarray, y: int):\n decision = self.weights.dot(x)\n v_t = x @ np.diag(np.diag(self._sigma)) @ x.T\n m_t = y * decision\n loss = (self._phi * math.sqrt(v_t) - m_t)\n #print(loss)\n if loss > 0:\n # We scale our learning rate (alpha) using the weight/cost\n alpha_t = self.class_weight_[y] * self._get_alpha(m_t, v_t)\n u_t = 0.25 * (-alpha_t * v_t * self._phi + math.sqrt(\n alpha_t ** 2 * v_t ** 2 * self._phi ** 2 + 4 * v_t)) ** 2\n beta_t = alpha_t * self._phi / (math.sqrt(u_t) +\n alpha_t * self._phi * v_t)\n sigma = np.expand_dims(x @ self._sigma, axis=0)\n self.weights += alpha_t * y * np.squeeze(sigma)\n self._sigma -= beta_t * sigma.T @ sigma", "def setup_train(self, input_data, target):\n \n W_my = self.setup_print(self.W, \"intial W\")\n \n # The weights with the random adjustment are <batch_size, from, to>, so\n # we inflate W here, too.\n W_exp = tf.tile(tf.expand_dims(W_my, 0), [self.config['batch_size'], 1, 1]) # <batch_size, from, to>\n\n # 1. Actual output\n output = self.setup_forward(W_exp, input_data, prefix=\"org\") # <batch_size, (timesteps,) output>\n loss = self.setup_loss(output, target, prefix=\"org\") # <batch_size>\n loss = self.setup_print(loss, \"loss\")\n \n # 2. Test output in the environment\n # TODO Do the random test around the decayed weights\n # NOTE: W_adj_source keeps its value inside a single run\n # https://stackoverflow.com/questions/52213325/are-tensorflow-random-values-guaranteed-to-be-the-same-inside-a-single-run\n W_adj = self.W_adj_source # <batch_size, from, to>\n W_adj = self.setup_print(W_adj, \"W_adj\")\n \n output_adj = self.setup_forward(W_exp + W_adj, input_data, prefix=\"adj\")\n loss_adj = self.setup_loss(output_adj, target, prefix=\"adj\")\n loss_adj = self.setup_print(loss_adj, \"loss_adj\")\n # improvement is positive when we go from large error to small error\n improvement = loss - loss_adj # <batch_size>\n improvement = self.setup_print(improvement, \"improvement\")\n \n # Update the weights\n improvement = tf.expand_dims(tf.expand_dims(improvement, 1), 2) # <batch_size, 1, 1>\n weight_update = W_adj * improvement # <batch_size, from, to>\n weight_update = self.setup_print(weight_update, \"weight_update\")\n weight_update = tf.reduce_mean(weight_update, axis=0) # <from, to>\n \n weight_update = self.setup_print(weight_update, \"weight_update_reduced\")\n weight_update = self.W.assign_add(weight_update)\n \n # Get the average loss\n loss_avg = tf.reduce_mean(loss, axis=0)\n \n return weight_update, loss_avg", "def update(self, state, action, nextState, reward):\n # print \"Update\"\n difference = (reward + self.discount*self.compValFromState(nextState)) - self.getQValue(state, action)\n features = self.featExtractor.getFeatures(state, self.index)\n #print \"features\", features, \"difference\", difference, \"weights\", self.weights\n for key in self.weights:\n self.weights[key] = self.alpha * difference * features[key]", "def fit(self, input, output):\n last = self.w0 # equivalent to wk in the loop\n for iteration in range(self.descents):\n sum_over_i = [0.0] * len(last)\n for i in range(len(input)):\n wtx = np.dot(np.transpose(last), input[i, :])\n sum_over_i = np.add(sum_over_i, input[i, :] * (output[i] - self.sigmoid(wtx)))\n last = np.add(last, self.learning_rate * sum_over_i)\n self.final_weights = last", "def update_learning_rate(self, n_batches):\r\n criterion1 = n_batches < self.C.lr_ramp_up_minibatches\r\n criterion2 = n_batches % (self.C.lrdi + self.C.lr_ramp_up_minibatches * self.C.ramp_up_lr) == 0\r\n\r\n if self.C.ramp_up_lr and criterion1:\r\n # calculate what the \"maximum\" learning rate should be given the\r\n # input params, and ramp up the learning rate\r\n max_lr = self.C.max_rel_lr * self.C.init_lr\r\n lr_ramp_up_factor = np.exp(np.log(max_lr / self.C.init_lr) / self.C.lr_ramp_up_minibatches)\r\n\r\n # learning rate will increase if not `maximum_lr` already\r\n util.update_lr(optimizer=self.optimizer,\r\n scale_factor=lr_ramp_up_factor,\r\n maximum_lr=max_lr)\r\n\r\n elif criterion2:\r\n # decreate the learning rate\r\n min_lr = self.C.min_rel_lr * self.C.init_lr\r\n util.update_lr(optimizer=self.optimizer,\r\n scale_factor=self.C.lrdf**n_batches,\r\n minimum_lr=min_lr)", "def optimize(layer: Layer, learning_rate: float) -> None:\n layer._weights -= learning_rate * layer._dw\n layer._bias -= learning_rate * layer._db", "def update(self,inputs,ifrestart):\n self.lenth=int(np.size(inputs)/self.n_inputs)\n inputs=np.reshape(inputs, (self.n_inputs,self.lenth)) \n\n if ifrestart:\n self.state=np.zeros((self.n_reservoir,self.lenth))\n self.state[:,0]=np.dot(self.V,inputs[:,0].T)\n\n else:\n self.state=np.hstack(\n (self.laststate,\n np.zeros((self.n_reservoir,self.lenth)))\n ) \n\n inputs=np.hstack((self.lastinput,inputs))\n\n self.lenth+=1 \n\n for i in range(1,self.lenth):\n self.state[:,i]=(\n np.dot(self.W.T, self.state[:,i-1])\n + self.a * np.dot(self.V, inputs[:,i].T) \n + self.Wb.T\n + self.noiseVec #TODO:\n #+ self.noise * np.random.normal(size=(self.n_reservoir,1)).T \n ) \n\n self.laststate=self.state[:,-1]\n self.laststate=np.reshape(self.laststate, (len(self.laststate),-1))\n \n self.lastinput=inputs[:,-1]\n self.lastinput=np.reshape(self.lastinput, (len(self.lastinput),-1))\n self.bias=np.ones((1,self.lenth))\n self.allstate=np.vstack((self.bias,self.state))", "def step_update(self, num_updates):\n if self.args['optimization']['warmup_updates'] > 0 and \\\n num_updates <= self.args['optimization']['warmup_updates']:\n self.warmup_factor = num_updates / float(self.args['optimization']['warmup_updates'])\n lr = self.warmup_factor * self.lr\n elif num_updates >= self.total_num_update:\n lr = self.end_learning_rate\n else:\n warmup = self.args['optimization']['warmup_updates']\n lr_range = self.lr - self.end_learning_rate\n pct_remaining = 1 - (num_updates - warmup) / (self.total_num_update - warmup)\n lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate\n self.optimizer.set_lr(lr)\n return self.optimizer.get_lr()", "def update(self, x, win, t, max_iteration):\n eta = self._decay_function(self._learning_rate, t, max_iteration)\n # sigma and learning rate decrease with the same rule\n sig = self._decay_function(self._sigma, t, max_iteration)\n # improves the performances\n g = self.neighborhood(win, sig)*eta\n # w_new = eta * neighborhood_function * (x-w)\n self._weights += einsum('ij, ijk->ijk', g, x-self._weights)", "def update(self, X, y):\n proba = self.predict_proba(X)\n top_loss = proba - y\n bias_gradient = np.sum(top_loss)\n weight_gradient = (top_loss).T.dot(X)\n\n # the gradient update\n self.b = self.b - self.lrate * bias_gradient\n self.W = self.W - self.lrate * weight_gradient", "def updateWeights(self, LR, M):\n assert self._batch\n for i,(dws,odws) in enumerate(zip(self._wDeltas, self._oldWDeltas)):\n ws = self._layer.weightsAt(i)\n for j, (dw, odw) in enumerate(zip(dws, odws)):\n dw = LR*dw + M*odw\n ws[j] += dw\n odws[j] = dw\n dws[j] = 0.0", "def update_weights(self, alpha, ind):\n inside = -alpha * self.labels * self.predictions[ind, :]\n new_weights = self.weights * np.exp(inside)\n self.weights = new_weights / np.sum(new_weights)", "def update_model_parameters(parameters, grads, learning_rate):\n L = len(parameters) /2 # number of layers in the neural network\n\n for l in range(int(L)):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n return parameters\n # raise NotImplementedError", "def _update_initial_learning_rate(configs, learning_rate):\n\n optimizer_type = get_optimizer_type(configs[\"train_config\"])\n if optimizer_type == \"rms_prop_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.rms_prop_optimizer\n elif optimizer_type == \"momentum_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.momentum_optimizer\n elif optimizer_type == \"adam_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.adam_optimizer\n else:\n raise TypeError(\"Optimizer %s is not supported.\" % optimizer_type)\n\n learning_rate_type = get_learning_rate_type(optimizer_config)\n if learning_rate_type == \"constant_learning_rate\":\n constant_lr = optimizer_config.learning_rate.constant_learning_rate\n constant_lr.learning_rate = learning_rate\n elif learning_rate_type == \"exponential_decay_learning_rate\":\n exponential_lr = (\n optimizer_config.learning_rate.exponential_decay_learning_rate)\n exponential_lr.initial_learning_rate = learning_rate\n elif learning_rate_type == \"manual_step_learning_rate\":\n manual_lr = optimizer_config.learning_rate.manual_step_learning_rate\n original_learning_rate = manual_lr.initial_learning_rate\n learning_rate_scaling = float(learning_rate) / original_learning_rate\n manual_lr.initial_learning_rate = learning_rate\n for schedule in manual_lr.schedule:\n schedule.learning_rate *= learning_rate_scaling\n elif learning_rate_type == \"cosine_decay_learning_rate\":\n cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate\n learning_rate_base = cosine_lr.learning_rate_base\n warmup_learning_rate = cosine_lr.warmup_learning_rate\n warmup_scale_factor = warmup_learning_rate / learning_rate_base\n cosine_lr.learning_rate_base = learning_rate\n cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate\n else:\n raise TypeError(\"Learning rate %s is not supported.\" % learning_rate_type)", "def _update_params(self, gradients: dict, learning_rate: float):\n L = len(self.activations)\n\n for l in range(L):\n self.params[\"W_\" + str(l + 1)] = self.params[\"W_\" + str(l + 1)] - learning_rate * gradients[\n \"dW\" + str(l + 1)]\n\n self.params[\"b_\" + str(l + 1)] = self.params[\"b_\" + str(l + 1)] - learning_rate * gradients[\n \"db\" + str(l + 1)]", "def perform_update(self, gradient):\n w = sys.modules[self.shared_mem_name].__dict__[\"w\"]\n w -= self.learning_rate * gradient", "def update_weight(y, unsupervised_weight, next_weight):\n y[:, -1] = next_weight\n unsupervised_weight[:] = next_weight\n\n return y, unsupervised_weight", "def update_weight():\n\twts = request.json['featureWeights']\n\n\t# Intialize new model with the latest weights\n\tglobal model\n\tmodel = tscore.ScoreModel(wts)\n\treturn jsonify( { 'updated': \"True\", 'featureWeights': wts } ), 201", "def adjust_learning_rate(self):\n out_base_lr = self.args.base_lr\n for param_group in self.optimizer.param_groups:\n in_lr = param_group[\"initial_lr\"]\n out_lr = in_lr\n if self.args.lr_decay_type == \"cos\": # cosine lr schedule\n out_lr *= 0.5 * (1.0 + np.cos(np.pi * self.epoch / self.args.epochs))\n else: # stepwise lr schedule\n for milestone in self.args.lr_step_schedule:\n out_lr *= 0.1 if self.epoch >= milestone else 1.0\n param_group[\"lr\"] = out_lr\n if in_lr == self.args.base_lr:\n out_base_lr = out_lr\n if self.train_logger is not None:\n self.train_logger.scalar_summary(\n \"metrics/%s/epoch\" % self.full_name, self.epoch, step=self.iteration, increment_counter=False\n )\n self.train_logger.scalar_summary(\n \"metrics/%s/lr\" % self.full_name, out_base_lr, step=self.iteration, increment_counter=False\n )\n print(\"Epoch\", self.epoch, \"Learning rate\", out_base_lr)\n return out_base_lr", "def step_update(self, num_updates):\n if num_updates < self.cfg.warmup_updates:\n self.lr = self.warmup_init_lr + num_updates * self.warmup_lr_step\n else:\n curr_updates = num_updates - self.cfg.warmup_updates\n lr_mult = self.lr_decay ** (curr_updates // self.lr_deacy_period)\n self.lr = max(self.max_lr * lr_mult, self.min_lr)\n\n self.optimizer.set_lr(self.lr)\n return self.lr", "def weights_decay(self):\n for param_group in self.optimizer.param_groups:\n for param in param_group['params']:\n param.data = param.data.add(-1.*self.weights_decay * param_group['lr'], param.data)", "def assign_learning_rate(session, lr_update, lr_placeholder, new_lr):\n session.run(lr_update, feed_dict={lr_placeholder: new_lr})", "def update_parameters(parameters, grads, learning_rate):\n pass", "def Train(self, input_data: list, target_output_data: list, learning_rate: float) -> float:\n ComputeGradients(input_data, target_output_data)\n self.hidden_layer_biases -= learning_rate * self.hidden_biases_gradient\n self.output_layer_biases -= learning_rate * self.output_biases_gradient\n self.input_to_hidden_weights -= learning_rate * self.input_to_hidden_weights_gradient\n self.hidden_to_output_weights -= learning_rate * self.hidden_to_output_weights_gradient", "def doWeights(self, index, o, odelta, weights, oldDeltas, LR, M):\n\n delta = LR*o*odelta + M*oldDeltas[index]\n weights[index] += delta\n oldDeltas[index] = delta", "def update(self, state, action, nextState, reward):\n candidateQ = reward + self.discount * \\\n self.computeValueFromQValues(nextState)\n currentQ = self.getQValue(state, action)\n difference = candidateQ - currentQ\n features = self.featExtractor.getFeatures(state, action)\n for feat in features:\n self.weights[feat] += self.alpha * difference * features[feat]", "def update_learning_rate(self, validation_loss=None):\n if validation_loss is None:\n for scheduler in self.schedulers:\n scheduler.step()\n else:\n for scheduler in self.schedulers:\n scheduler.step(validation_loss)\n self.lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = {0:.7f}'.format(self.lr))", "def update_weights(self, weight_delta):\n\n self._weights = math_util.vector_sum(self._weights, weight_delta)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]\n\n #if self.epsilon > self.epsilon_min:\n # self.epsilon *= self.epsilon_decay", "def test_weights_update(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n model_state_dict = self.model.state_dict(keep_vars=True)\n actor_model_state_dict = self.actor_model.state_dict(keep_vars=True)\n for key, initial_tensor in self.initial_model_dict.items():\n model_tensor = model_state_dict[key]\n actor_model_tensor = actor_model_state_dict[key]\n # Assert that the gradient is not zero for the learner.\n self.assertGreater(torch.norm(model_tensor.grad), 0.0)\n # Assert actor has no gradient.\n # Note that even though actor model tensors have no gradient,\n # they have requires_grad == True. No gradients are ever calculated\n # for these tensors because the inference function in polybeast.py\n # (that performs forward passes with the actor_model) uses torch.no_grad\n # context manager.\n self.assertIsNone(actor_model_tensor.grad)\n # Assert that the weights are updated in the expected way.\n # We manually perform a gradient descent step,\n # and check that they are the same as the calculated ones\n # (ignoring floating point errors).\n expected_tensor = (\n initial_tensor.detach().numpy() - self.lr * model_tensor.grad.numpy()\n )\n np.testing.assert_almost_equal(\n model_tensor.detach().numpy(), expected_tensor\n )\n np.testing.assert_almost_equal(\n actor_model_tensor.detach().numpy(), expected_tensor\n )", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]", "def update(self,inputs,ifrestart=1):\n self.lenth=int(np.size(inputs)/self.n_inputs)\n inputs=np.reshape(inputs, (self.n_inputs,self.lenth)) \n\n if ifrestart:\n self.state=np.zeros((self.n_reservoir,self.lenth))\n self.state[:,0]=np.dot(self.V,inputs[:,0].T)\n\n else:\n self.state=np.hstack(\n (self.laststate,\n np.zeros((self.n_reservoir,self.lenth)))\n ) \n\n inputs=np.hstack((self.lastinput,inputs))\n\n self.lenth+=1 \n\n for i in range(1, self.lenth):\n self.state[:,i]=(\n self.alpha * self.state[:,i]\n +\n (1 - self.alpha) * np.tanh(\n np.dot(self.W.T, self.state[:,i-1])\n + self.a * np.dot(self.V, inputs[:,i].T) \n + self.Wb.T\n ) \n + self.noiseVec #TODO:\n #+ self.noise * np.random.normal(size=(self.n_reservoir,1)).T \n ) \n\n self.laststate=self.state[:,-1]\n self.laststate=np.reshape(self.laststate, (len(self.laststate),-1))\n self.lastinput=inputs[:,-1]\n self.lastinput=np.reshape(self.lastinput, (len(self.lastinput),-1))\n self.bias=np.ones((1,self.lenth))\n self.allstate=np.vstack((self.bias,self.state))", "def adjust_learning_rate(self, optimizer, epoch, initial_lr, writer=None):\n lr = initial_lr * (0.98 ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n if writer:\n writer.add_scalar(\"lr_G\", lr, epoch + 1)", "def update_weights(architecture,grad_weights,grad_bias,m,v,t,lr,optimizer=\"adam\"):\n \n for layer in range(len(architecture)):\n if not (grad_weights['layer{}'.format(layer+1)] is None) and grad_bias['layer{}'.format(layer+1)] is not None:\n grad_weightsi = grad_weights['layer{}'.format(layer+1)]\n grad_weightsi /= bs\n grad_biasi = grad_bias['layer{}'.format(layer+1)]\n grad_biasi /= bs\n\n \n if optimizer.lower()==\"sgd\":\n # Mini-Batch SGD\n qw = lr*grad_weightsi\n qb = lr*grad_biasi\n else:\n # Mini-Batch Adam\n mw,mb = m['layer{}'.format(layer+1)]\n vw,vb = v['layer{}'.format(layer+1)]\n qw,mw,vw = adam(grad_weightsi,beta_1,beta_2,mw,vw,t,lr) # Have obtained dw\n qb,mb,vb = adam(grad_biasi,beta_1,beta_2,mb,vb,t,lr) # Have obtained db\n\n architecture['layer{}'.format(layer+1)][2].requires_grad = False\n architecture['layer{}'.format(layer+1)][3].requires_grad = False\n # Updating weights and biases now\n try:\n architecture['layer{}'.format(layer+1)][2] -= torch.Tensor(qw)\n except:\n architecture['layer{}'.format(layer+1)][2] -= torch.t(torch.Tensor(qw))\n try:\n architecture['layer{}'.format(layer+1)][3] -= torch.Tensor(qb)\n except:\n architecture['layer{}'.format(layer+1)][3] -= torch.t(torch.Tensor(qb))\n\n m['layer{}'.format(layer+1)][0] = torch.Tensor(mw)\n m['layer{}'.format(layer+1)][1] = torch.Tensor(mb)\n v['layer{}'.format(layer+1)][0] = torch.Tensor(vw)\n v['layer{}'.format(layer+1)][1] = torch.Tensor(vb)\n grad_weights['layer{}'.format(layer+1)] = torch.zeros(grad_weightsi.shape)\n grad_bias['layer{}'.format(layer+1)] = torch.zeros(grad_biasi.shape)\n return grad_weights,grad_bias,m,v", "def back_propagate(self, inputs, hidden, output, errors):\n d_output = self._da(output) * errors\n d_hidden = self._da(hidden) * dot(d_output, self.W_output[:-1].T)\n\n n_samples = inputs.shape[0]\n bias = ones((n_samples, 1))\n # Update momentum and weights\n self.V_output = self.output_learning_rate * dot(c_[hidden, bias].T, d_output) / n_samples\n self.W_output+= self.V_output\n\n self.V_hidden = self.hidden_learning_rate * dot(c_[inputs, bias].T, d_hidden) / n_samples\n self.W_hidden+= self.V_hidden", "def _step(self):\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n # random choose the samples\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config" ]
[ "0.80830616", "0.7857255", "0.77221936", "0.75878155", "0.7544157", "0.75073385", "0.7503516", "0.74666196", "0.7415437", "0.7407029", "0.7364739", "0.7336763", "0.7227047", "0.721761", "0.7210267", "0.71573895", "0.7152569", "0.7145055", "0.7145055", "0.7090362", "0.7078788", "0.7007819", "0.69958127", "0.69703805", "0.69543064", "0.69366837", "0.68969536", "0.6893947", "0.6885137", "0.6882394", "0.6873967", "0.68690956", "0.68609035", "0.68609035", "0.68421334", "0.6839933", "0.68374974", "0.6837333", "0.6830847", "0.68301916", "0.6819015", "0.6816679", "0.67913175", "0.6790177", "0.67215157", "0.6716838", "0.66944534", "0.66884613", "0.6674114", "0.66707504", "0.66595995", "0.66419077", "0.66263", "0.6614306", "0.6598464", "0.65973705", "0.65742606", "0.6565358", "0.654371", "0.654371", "0.6539628", "0.65392566", "0.6532652", "0.6529181", "0.65175205", "0.6513337", "0.65096945", "0.649863", "0.64939404", "0.6486433", "0.64848214", "0.6484577", "0.64836085", "0.64833146", "0.6477442", "0.64634717", "0.646091", "0.64608985", "0.6454962", "0.6448831", "0.64420485", "0.644203", "0.6428474", "0.642389", "0.64200073", "0.641952", "0.64194226", "0.6416538", "0.6411317", "0.64031506", "0.64014935", "0.6399576", "0.639831", "0.63960433", "0.63726515", "0.6366976", "0.6361816", "0.6360621", "0.6359254", "0.6358597" ]
0.7508802
5
console print, with color
def print_color(line, color=Colors.DEFAULT): sys.stdout.write("{}{}{}".format(color.value, line, Colors.DEFAULT.value))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printcolor(color, text):\r\n pushcolor()\r\n setcolor(color)\r\n print text\r\n popcolor()", "def color_print(txt, foreground=PALETTE['white'], background=PALETTE['black']):\n print(color_text(txt, foreground, background))", "def color_print(message, color, newline='\\n'):\n sys.stderr.write('%s%s%s%s' % (color, message, ANSI_NORMAL, newline))", "def printColorizedInWindows(text, color):\n std_out_handle = ctypes.windll.kernel32.GetStdHandle(-11)\n for i in range(0, len(color)):\n ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, color[i])\n sys.stdout.write(text)\n # cor padrão é 7, white\n ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, 7)", "def print_out(self):\n for line in self.canvas:\n for char_style in line:\n colors.print_style_char(char_style)\n sys.stdout.write('\\n')", "def print_with_color(message, color):\n if color in colors:\n print(colors[color] + message + '\\x1b[0m')\n else:\n print(message)", "def printc(text, color='black', style='normal', **kwargs):\n\n print(strc(text, color, style), **kwargs)\n sys.stdout.flush()", "def printRed(text):\n print(Fore.RED + text + Fore.WHITE)", "def print_log(value_color=\"\", value_noncolor=\"\"):\n HEADER = '\\033[92m'\n ENDC = '\\033[0m'\n print(HEADER + value_color + ENDC + str(value_noncolor))", "def print_with_color(message: str, color: str):\n import sys\n print(color + message + constant.Color.ENDC, file=sys.stderr)", "def print_colored(word):\n for char in word:\n print(c.rc() + char + c.x, end='')", "def status(s):\n print(\"\\033 {}\".format(s))#print(\"\\033[1m{0}\\033[0m\".format(s))", "def echo(self, *args, **kwargs):\n text = \" \".join([str(item) for item in args])\n color = kwargs.get(\"color\", 32)\n sys.stdout.write(\"\\033[0;%dm%s\\033[0;m\" % (color, text))", "def prBlueBG(text):\n print(\"\\033[44m{}\\033[0m\".format(text), sep=\"\")", "def pr(*args, c=None, sep=' ', end='\\n'):\n\n msg = \"\"\n cnt = 0\n for i in args:\n cnt += 1\n if c is None:\n msg += str(i) + sep\n else:\n color = get_color_from_str(c, cnt)\n if color == 'w':\n msg += WHITE.format(i) + sep\n elif color == 'r':\n msg += RED.format(i) + sep\n elif color == 'g':\n msg += GREEN.format(i) + sep\n elif color == 'y':\n msg += YELLOW.format(i) + sep\n elif color == 'b':\n msg += BLUE.format(i) + sep\n elif color == 'm':\n msg += MAGENTA.format(i) + sep\n elif color == 'c':\n msg += CYAN.format(i) + sep\n else:\n msg += str(i) + sep\n msg += end\n print(msg, sep='', end='')", "def print_green(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.GREEN + msg)\n print(Style.RESET_ALL + \"\", end=\"\")", "def print_color(text: str, color:bcolors = bcolors.BOLD):\n print(f\"{color}{text}{bcolors.ENDC}\")", "def _print(self, message, level, color):\n if (self.level >= level):\n sys.stdout.write(color)\n try: sys.stdout.write(\"%s\\n\" % message)\n except: sys.stdout.write(encode(\"%s\\n\" % message))\n sys.stdout.write(COLOR_RESET)\n sys.stdout.flush()\n return message", "def writec(text, color='black', style='normal'):\n\n sys.stdout.write(strc(text, color, style))", "def _print(txt):\n\n # Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Style: DIM, NORMAL, BRIGHT, RESET_ALL\n print('{0}{1}'.format(Style.BRIGHT + txt, Fore.RESET + Back.RESET + Style.RESET_ALL))", "def print_msg(*msg):\n colour_format = '0;36'\n print('\\x1b[%sm%s\\x1b[0m' % (colour_format, \" \".join([m if isinstance(m, str) else str(m) for m in msg])))", "def print(self):\n currentFgColor = CLEAR\n currentBgColor = CLEAR\n print(Fore.RESET + Back.RESET, end='')\n\n for y in range(self._height):\n for x in range(self._width):\n # Set colorama fg color if the fg color has changed.\n fg = self._fginfo[x][y]\n if fg is None:\n fg = CLEAR\n if fg != currentFgColor:\n print(COLORAMA_FG_MAP[fg], end='')\n currentFgColor = fg\n\n # Set colorama bg color if the bg color has changed.\n bg = self._bginfo[x][y]\n if bg is None:\n bg = CLEAR\n if bg != currentBgColor:\n print(COLORAMA_BG_MAP[bg], end='')\n currentBgColor = bg\n\n # Display the character.\n c = self._chars[x][y]\n if c is None:\n c = ' '\n print(c, end='') # TODO - see if there's ways to optimize this.\n print()\n print(Fore.RESET + Back.RESET, end='')", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def cprint(\r\n text,\r\n color=None,\r\n background=None,\r\n bold=False,\r\n italic=False,\r\n underline=False,\r\n end=\"\\n\",\r\n):\r\n\r\n _reset = Color.RESET\r\n bg = \"\" if background is None else _set_background(background)\r\n font_color = \"\" if color is None else _set_color(color)\r\n font_style = _set_font_style(bold=bold, italic=italic, underline=underline)\r\n output_style = bg + font_color + font_style\r\n sys.stdout.write(output_style)\r\n sys.stdout.write(text)\r\n sys.stdout.write(_reset + end)", "def redtext(mesg):\n if sys.platform == 'win32':\n import win32console\n handle = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)\n reset = handle.GetConsoleScreenBufferInfo()['Attributes']\n handle.SetConsoleTextAttribute(12)\n sys.stdout.writelines(mesg+'\\n')\n handle.SetConsoleTextAttribute(reset)\n else:\n sys.stdout.write('\\033[91m'+mesg+'\\033[0m\\n')", "def print_red(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.RED + msg)\n print(Style.RESET_ALL + \"\", end=\"\")", "def color(color):\n if sys.platform == \"win32\":\n if color == \"green\":\n set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"yellow\":\n set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"red\":\n set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"blue\":\n set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"reset\":\n set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)\n else :\n if color == \"green\":\n sys.stdout.write('\\033[92m')\n elif color == \"red\":\n sys.stdout.write('\\033[91m')\n elif color == \"blue\":\n sys.stdout.write('\\033[94m')\n elif color == \"reset\":\n sys.stdout.write('\\033[0m')", "def status(s: str):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def color_string(self, data, type):\n\n # http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python\n\n if self.options['no_color']:\n return data\n\n CEND = '\\x1b[0m'\n\n if type == ColorType.ok:\n return '\\x1b[1;32;40m{0}{1}'.format(data, CEND)\n if type == ColorType.error:\n return '\\x1b[1;31;40m{0}{1}'.format(data, CEND)\n if type == ColorType.warning:\n return '\\x1b[1;36;40m{0}{1}'.format(data, CEND)\n if type == ColorType.info:\n return '\\x1b[1;34;40m{0}{1}'.format(data, CEND)\n\n return str", "def ansiprint(s, fg=\"\", bg=\"\", i=False):\n esc = \"\\033[{:d}{}m\"\n iv = \"\"\n if i:\n iv = \";1\"\n if fg != \"\":\n fg = esc.format(30 + fg, iv)\n if bg != \"\":\n bg = esc.format(40 + bg, iv)\n print(\"\".join([fg, bg, s, esc.format(0, \"\")]))", "def Print(self, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (5, self.textLine))\r\n self.textLine += 15", "def showColors(self):\n\t\tcolors = ['white', 'red', 'green', 'orange', 'blue', 'purple', 'cyan', 'lightgrey',\n\t\t\t\t 'darkgrey', 'light red', 'light green', 'yellow', 'light blue', 'purple', 'cyan', 'dark white']\n\t\tmax = curses.COLORS if curses.COLORS <= 16 else 16\n\t\tself.screen.clear()\n\t\tfor c in range(0, max):\n\t\t\tself.wts(c + 2, 1, \"color \" + str(c) + ' : ' + colors[c], c)\n\t\tself.wts(18, 1, \"color 16 : red on white\", 16)\n\t\tself.wts(20, 1, 'Color demo, displaying ' + str(max) + ' colors + 1 special')\n\t\tself.screen.refresh()\n\t\tch = False\n\t\twhile not ch:\n\t\t\tch = self.screen.getch()\n\t\tself.exit('Color demo complete')", "def display(self, color = (190,205,205), add = False):\r\n\t\tpass", "def print_debug(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 36, **kwargs)", "def style_output(msg='{}'):\n green_code = '\\033[0;32m'\n return text_color(msg, green_code)", "def write(self, x, y, msg, fg=(255, 255, 255), bg=None):\n self.console.draw_str(x, y, msg, fg, bg)", "def print_colors() -> None:\n color_pivot = [0]\n color_pivot += [e * 6 + 16 for e in range(37)]\n color_pivot.append(256)\n color_pivot_start = color_pivot[:-1]\n color_pivot_end = color_pivot[1:]\n color_table_list = [range(cs, ce) for cs, ce in zip(color_pivot_start, color_pivot_end)]\n\n for color_table in color_table_list:\n text = \"\"\n for color in color_table:\n color_string = str(color)\n padding = \"\".join([\" \" for e in range(3 - len(color_string))])\n text += colorize(f\" {padding}{color_string} \", background_256=color, with_end=False)\n print(text + colorize(\"\", background=DEFAULT))", "def print_info(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 32, **kwargs)", "def c_prnt(self, text, color):\n if color == 'pink':\n a = self.pink\n elif color == 'blue':\n a = self.blue\n elif color == 'green':\n a = self.green\n elif color == 'dgrn':\n a = self.dgrn\n elif color == 'yel':\n a = self.yellow\n elif color == 'amber':\n a = self.amber\n else:\n raise Exception('The color you selected is not acceptable')\n print(a + text + self.ENDC)", "def printf(col,col2,*string):\n temp,temp2,tep,x,y,flag,reset=col,col2,'','','',False,'\\033[0m'\n col,col2=col.upper(),col2.upper()\n col_list={'BLACK':30,'RED':31,'GREEN':32,'YELLOW':33,'BLUE':34,'MAGENTA':35,'CYAN':36,'WHITE':37}\n col2_list={'BLACK':30,'RED':31,'GREEN':32,'YELLOW':33,'BLUE':34,'MAGENTA':35,'CYAN':36,'WHITE':37}\n if col.find('BG')!=-1:\n col=col.replace('BG','')\n col=col.replace('-','')\n for i in col_list.keys():\n col_list[i]=col_list[i]+10\n elif col.find('FG')!=-1:\n col=col.replace('FG','')\n col=col.replace('-','')\n if col2.find('BG')!=-1:\n col2=col2.replace('BG','')\n col2=col2.replace('-','')\n for i in col2_list.keys():\n col2_list[i]=col2_list[i]+10\n elif col2.find('FG')!=-1:\n col2=col2.replace('FG','')\n col2=col2.replace('-','')\n for i in string:\n tep=tep+''+str(i)\n if col2 in col2_list:\n x='\\033['+str(col2_list[col2])+'m'\n flag=True\n else:\n tep=temp2+''+tep\n if col in col_list:\n y='\\033['+str(col_list[col])+'m'\n flag=True\n else:\n tep=temp+''+tep\n if flag:\n tep=x+y+tep+reset\n sys.stdout.write (tep)\n sys.stdout.flush()", "def charcolor(message):\n try:\n print(c.clear)\n while True:\n print_colored(c.clear + c.multi + \"Hello\" + \" \" + who + \"!\")\n except KeyboardInterrupt:\n exit()", "def color_print(*args, **kwargs):\n file = kwargs.get('file', sys.stdout)\n\n end = kwargs.get('end', '\\n')\n\n write = file.write\n if file.isatty():\n for i in range(0, len(args), 2):\n msg = args[i]\n if i + 1 == len(args):\n color = ''\n else:\n color = args[i + 1]\n\n if color:\n msg = _color_text(msg, color)\n\n # Some file objects support writing unicode sensibly on some Python\n # versions; if this fails try creating a writer using the locale's\n # preferred encoding. If that fails too give up.\n if not PY3 and isinstance(msg, bytes):\n msg = _decode_preferred_encoding(msg)\n\n write = _write_with_fallback(msg, write, file)\n\n write(end)\n else:\n for i in range(0, len(args), 2):\n msg = args[i]\n if not PY3 and isinstance(msg, bytes):\n # Support decoding bytes to unicode on Python 2; use the\n # preferred encoding for the locale (which is *sometimes*\n # sensible)\n msg = _decode_preferred_encoding(msg)\n write(msg)\n write(end)", "def color_sample():\r\n env = dict()\r\n setup_quiet_build(env)\r\n for item in env.iteritems():\r\n print item[0],item[1]", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def in_green(s: str) -> str:\n return f\"\\033[92m{str(s)}\\033[0m\"", "def print_substep(text, style=\"\"):\n console.print(text, style=style)", "def reset_color():\n sys.stdout.write(\"\\033[0m\")", "def print_yellow(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.YELLOW + msg)\n print(Style.RESET_ALL + \"\", end=\"\")", "def b(string):\n return \"\\033[94m{0}\\033[0m\".format(string)", "def print(self):\n self.print_avec_separateur(\" \")", "def color(code):\n return lambda t: \"\\033[{0}{1}\\033[0;m\".format(code, t)", "def bg(r: int, g: int, b: int) -> str:\n return f\"\\033[48;2;{r};{g};{b}m\"", "def _colorstr(self, args):", "def print_cbt(msg: str, color: str = \"\", bright: bool = False, tag: str = \"\", end=\"\\n\"):\n brgt = Style.BRIGHT if bright else \"\"\n\n if not isinstance(tag, str):\n raise pyrado.TypeErr(given=tag, expected_type=str)\n else:\n if tag != \"\":\n tag = f\"[{tag}] \"\n\n color = color.lower()\n if color in [\"\", \"w\", \"white\"]:\n print(brgt + tag + msg + Style.RESET_ALL, end=end)\n elif color in [\"y\", \"yellow\"]:\n print(Fore.YELLOW + brgt + tag + msg + Style.RESET_ALL, end=end)\n elif color in [\"b\", \"blue\"]:\n print(Fore.BLUE + brgt + tag + msg + Style.RESET_ALL, end=end)\n elif color in [\"g\", \"green\"]:\n print(Fore.GREEN + brgt + tag + msg + Style.RESET_ALL, end=end)\n elif color in [\"r\", \"red\"]:\n print(Fore.RED + brgt + tag + msg + Style.RESET_ALL, end=end)\n elif color in [\"c\", \"cyan\"]:\n print(Fore.CYAN + brgt + tag + msg + Style.RESET_ALL, end=end)\n else:\n raise pyrado.ValueErr(given=color, eq_constraint=\"'y', 'b', 'g', 'r', or 'c'\")", "def print_success(text):\n\n print(colorize(text, Colors.SUCCESS))", "def test_get_color(self):\n assert dockerprettyps.get_color(1) == \"\\033[94m\"\n assert dockerprettyps.get_color(200) == \"\\033[92m\"", "def color_print(\n *messages,\n default_color=Color.NORMAL,\n sep=' ',\n end='\\n',\n file=stdout,\n flush=False,\n):\n\n string = []\n print_colors = file.isatty()\n if print_colors:\n string.append(str(default_color))\n\n messages_iter = iter(messages)\n # Print first message and deal with 'sep' later\n first = next(messages_iter)\n is_color = isinstance(first, Color)\n if is_color and print_colors or not is_color:\n string.append(str(first))\n\n # Print sep only when message is a string\n for m in messages_iter:\n is_color = isinstance(m, Color)\n if is_color and print_colors:\n string.append(str(m))\n elif not is_color:\n string.append(f'{sep}{m}')\n\n # Back to normal\n if print_colors:\n string.append(str(Color.NORMAL))\n\n print(''.join(string), end=end, flush=flush, file=file)", "def cprint_table():\n colors = [None, 'k', 'r', 'g', 'y', 'b', 'm', 'c', 'w']\n styles = [None, 'b', 'f', 'i', 'u', 'x', 'y', 'r', 'h', 's']\n for style in styles:\n print(style)\n for fg in colors:\n for bg in colors:\n msg = ''\n for x in [fg, bg]:\n if x is None:\n msg += '-'\n else:\n msg += x\n cprint(' ' + msg + ' ', fg=fg, bg=bg, style=style, end='')\n print()\n print()", "def _color(self, args):", "def bg(value: int) -> str:\n return f\"\\033[48;5;{value}m\"", "def clowder_command(cmd):\n\n return colored(cmd, attrs=['bold'])", "def print_ok_blue(message: str):\n print_with_color(message, constant.Color.OKBLUE)", "def print_out():\n pass", "def print_line():\n print('+ - - - - + - - - - +'),", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))\n time.sleep(2)", "def print(self):\r\n self.print_avec_separateur()", "def show(self):\n import Helpers\n for p in self.parts:\n color = (p[1][0]*255, p[1][1]*255, p[1][2]*255, 0)\n Helpers.show(p[0], color)", "def __output(self,msg,status):\n status = int(status)\n if status:\n print \"%s-----------\\033[1;37;42m%s\\033[0m\" % (format(msg,\"<15\"),\"OK\")\n else:\n print \"%s***********\\033[1;37;41m%s\\033[0m\" % (format(msg,\"<15\"),\"ERROR\")", "def color(self, color=0):\n if color not in [0, 1, 2, 3, 4, 5, 6, 7]:\n raise ValueError('color must be a positive integer less than and 8 or 0')\n else:\n self._write(self.__class__.__ESC + 'r' + chr(color))", "def console(self, msg, color):\r\n if self.__isInit != True:\r\n return", "def draw(self):\n res = ''\n # ANSI code to clear the screen\n #res += chr(27) + \"[2J\"\n for position, value in enumerate(self.board.tttboard):\n if value is None:\n res += str(position)\n #sys.stdout.write(str(position))\n else:\n res += str(value)\n #sys.stdout.write(str(value))\n\n if (position + 1) % 3 != 0:\n res += str('|')\n #sys.stdout.write('|')\n else:\n #print ''\n\n res += str('\\n')\n if position == 2 or position == 5:\n #print '-' * 5\n\n res += '-' * 5\n res += str('\\n')\n return res", "def blueline(self):\n\t\treturn self.ESC+\"34m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\"+self.ESC+\"0m\\r\\n\"", "def my_print(self):\n if self.__size > 0:\n print(\"\\n\" * self.__position[1], end=\"\")\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.__size)\n else:\n print()", "def my_print(self):\n if self.__size > 0:\n for k in range(self.__position[1]):\n print()\n for i in range(self.__size):\n for j in range(self.__position[0]):\n print(\" \", end='')\n print(\"#\" * self.__size)\n else:\n print()", "def my_print(self):\n if self.__size is not 0:\n for ite in range(self.__position[1]):\n print()\n for ite in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.size)\n else:\n print()", "def print_text_line():\n print_indentation()\n print(STYLES[parameters[\"Style\"]][\"Vertical line\"], end=\"\")\n for _ in range(parameters[\"Surrounding spaces\"]):\n print(parameters[\"Fill char\"], end=\"\")\n print(text, end=\"\")\n for _ in range(parameters[\"Surrounding spaces\"]):\n print(parameters[\"Fill char\"], end=\"\")\n print(STYLES[parameters[\"Style\"]][\"Vertical line\"])", "def printer(msg):\r\n sys.stdout.write(\"\\r\" + msg)\r\n sys.stdout.flush()", "def add_color_emit_ansi(fn):\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new", "def fill():\n print('#', end='')", "def text_output(self):\n print(self.board)\n print()", "def print_message(self, message, color):\n\n xpos = 20\n ypos = self.height\n\n size = common.FONT_SIZE\n\n arcade.draw_text(\n text=message,\n start_x=xpos,\n start_y=ypos,\n anchor_x=\"left\",\n anchor_y=\"top\",\n width=size*len(message),\n color=color,\n font_size=size,\n bold=True)", "def _write_print_mode(self):\n self.write(self.ASCII_ESC, '!', self._print_mode)", "def clear():\n print(black, end='')", "def echo_style(message, no_color, fg='yellow'):\n if no_color:\n click.echo(message)\n else:\n click.secho(message, fg=fg)", "def color_str(text, color):\n if not is_cli() or no_color():\n # Disable color output if not in CLI mode or if color is disabled\n return text\n return f'\\033[{_COLORS[color]}m{text}\\033[30m'", "def cool_print(self, text=str, newline=True, margin=21, rate=.02):\n print(\" \" * margin, end='')\n for letter in text:\n sleep(.02)\n stdout.write(letter)\n stdout.flush()\n if newline:\n print()", "def color_style():\n if (sys.platform == 'win32' or sys.platform == 'Pocket PC'\n or sys.platform.startswith('java') or not sys.stdout.isatty()):\n return no_style()\n class dummy: pass\n style = dummy()\n style.ERROR = termcolors.make_style(fg='red', opts=('bold',))\n style.ERROR_OUTPUT = termcolors.make_style(fg='red', opts=('bold',))\n style.NOTICE = termcolors.make_style(fg='red')\n style.SQL_FIELD = termcolors.make_style(fg='green', opts=('bold',))\n style.SQL_COLTYPE = termcolors.make_style(fg='green')\n style.SQL_KEYWORD = termcolors.make_style(fg='yellow')\n style.SQL_TABLE = termcolors.make_style(opts=('bold',))\n return style", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def my_print(self):\n if self.__size == 0:\n print()\n else:\n print(\"\\n\" * self.__position[1], end='')\n for x in range(self.__size):\n print(\" \" * self.__position[0], end='')\n print(\"#\" * self.__size)", "def write(self, *params):\n if system() == 'Windows':\n windll.kernel32.SetConsoleTextAttribute(windll.kernel32.GetStdHandle(-11),\n self.COLORS[CONSOLE_MESSAGES[params[0]][0]])\n getattr(self._logger, CONSOLE_MESSAGES[params[0]][0].lower())(CONSOLE_MESSAGES[params[0]][1].\n format(*params[1:]))\n if system() == 'Windows':\n windll.kernel32.SetConsoleTextAttribute(windll.kernel32.GetStdHandle(-11), self.COLORS['DEFAULT'])", "def main():\n\n # print 'red string' in red\n print(crayons.red('red string'))\n\n # Red White and Blue text\n #print('{} white {}'.format(crayons.red('red'), crayons.blue('blue'))) # format string (old ver of str templating)\n print(f\"{crayons.red('red')} white {crayons.blue('blue')}\") # f-string (newest version of str templating)\n\n crayons.disable() # disables the crayons package\n\n # this line should NOT have color as crayons is disabled\n print(f\"{crayons.red('red')} white {crayons.blue('blue')}\") # f-string (newest version of string templating)\n\n crayons.DISABLE_COLOR = False # enable the crayons package\n\n # This line will print in color because color is enabled\n print(f\"{crayons.red('red')} white {crayons.blue('blue')}\") # f-string (newest version of string templating)\n\n # print 'red string' in red\n print(crayons.red('red string', bold=True))\n\n # print 'yellow string' in yellow\n print(crayons.yellow('yellow string', bold=True))\n\n # print 'magenta string' in magenta\n print(crayons.magenta('magenta string', bold=True))\n\n # print 'white string' in white\n print(crayons.white('white string', bold=True))", "def print_board(self):\n print_sp = functools.partial(print, end=' ')\n print_sp(' ')\n for i in range(BOARD_SIZE):\n print_sp(i)\n print()\n for i in range(BOARD_SIZE):\n print_sp(i)\n for j in range(BOARD_SIZE):\n e = self.board[j][i]\n print_sp('●') if e == BLACK else print_sp('○') if e == WHITE else print_sp('·')\n print()", "def _print_custom(self):\n pass" ]
[ "0.77871144", "0.7595031", "0.7444135", "0.7422378", "0.73676646", "0.7366209", "0.72847176", "0.7264958", "0.7261931", "0.7220704", "0.7219369", "0.7175978", "0.7156872", "0.71309286", "0.70842475", "0.7082594", "0.7079263", "0.70496935", "0.6940176", "0.69390655", "0.69368523", "0.6870607", "0.6869663", "0.6869663", "0.6869663", "0.6869663", "0.6857574", "0.6857574", "0.6857574", "0.6857574", "0.68469745", "0.6846843", "0.68229556", "0.68086135", "0.6777651", "0.67690235", "0.6751806", "0.6750194", "0.673745", "0.6726258", "0.67080045", "0.66959405", "0.6678695", "0.66782624", "0.66719276", "0.6669616", "0.66216147", "0.6615757", "0.6594407", "0.6580991", "0.6579447", "0.65529114", "0.6533433", "0.6505291", "0.6472588", "0.6464327", "0.64558107", "0.6453655", "0.644502", "0.6408865", "0.64004016", "0.63895196", "0.63803923", "0.63798803", "0.6371241", "0.63670146", "0.63540494", "0.6331455", "0.6320635", "0.63188636", "0.6317867", "0.6312553", "0.6311475", "0.63069695", "0.6306425", "0.6296793", "0.6294206", "0.6292912", "0.6287902", "0.6256636", "0.6233349", "0.62124693", "0.61915267", "0.61840826", "0.618307", "0.61704123", "0.61623794", "0.6154395", "0.61401147", "0.6133984", "0.6126604", "0.61263055", "0.6122375", "0.61221457", "0.6121441", "0.61207694", "0.6104739", "0.61046785", "0.6099007", "0.6096477" ]
0.75665677
2
setup function called before each test
def before_each_test(self, request): self.test_counter = Counter() self.check_ref = request.config.getvalue("check_ref") self.create_ref = request.config.getvalue("create_ref")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n test_env_setup()", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n MainTests.setUp(self)", "def _fixture_setup(self):\n pass", "def setUp(self):\n pass #because we dont have anything to setup.", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self) :\n pass", "def setUp(self):\n\n return", "def setUp(self):\n \n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setup( self ):", "def setUp(self):\n\n BaseTest.setUp(self)", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\r\n pass", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self) -> None:\n pass", "def setUp(self) -> None:\n pass", "def setUp(self):\n setUp()", "def setup(self) -> None:", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):\n\n self._set_up()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n print('Calling \\'setUp\\'')", "def setUp(self):\n self.setup_beets()", "def setUp(self):\r\n pass # nothing required by all\r", "def _setup(self):", "def _setup(self):", "def setup(self):\n pass", "def setUp(self):\n raise NotImplementedError", "def setup(self):\n pass", "def _setup(self) -> None:\n\t\treturn", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def setup(self):\n ...", "def _set_up():\n repl._setUp = self.setUp", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\r\n pass", "def before_run_tests(cls):\n pass", "def setup(self):\n\t\tpass", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):" ]
[ "0.84756434", "0.83393013", "0.83271843", "0.83170396", "0.82956946", "0.8280618", "0.8280618", "0.8279627", "0.82470554", "0.82384115", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82242966", "0.82217646", "0.82123184", "0.820357", "0.81897926", "0.81897926", "0.81782943", "0.81697583", "0.81697583", "0.8164004", "0.8164004", "0.8127081", "0.81249475", "0.8114959", "0.81002986", "0.80831444", "0.80831444", "0.80831444", "0.80831444", "0.80831444", "0.80831444", "0.80831444", "0.80831444", "0.80831444", "0.8065931", "0.8052411", "0.80349475", "0.80077565", "0.80077565", "0.79855967", "0.7979793", "0.79752445", "0.79707736", "0.79373956", "0.79367733", "0.7934448", "0.78993535", "0.7897643", "0.7887703", "0.7887703", "0.7887703", "0.7887703", "0.7887703", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.788295", "0.7876098", "0.7855538", "0.7824745", "0.7822038", "0.7822038", "0.7822038", "0.7822038", "0.7822038", "0.7822038", "0.7822038" ]
0.0
-1
Does nothing. Inherited from old Artemis where the kraken is stopped then started In Artemis NG, the kraken is restarted in 'kill_the_krakens'
def pop_krakens(cls): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):", "def restart(self) -> None:", "def stopEngines():\n pass", "def _restart(self):\n pass", "def restart(self):\r\n pass", "def _kill_kernel(self):", "def restart(self):\n pass", "def restart(self):\n self.km.restart_kernel(now=True)", "def reboot(self):\n raise NotImplementedError", "def shutdown(self):", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def restart():\n stop()\n start()", "def shutdown(self):\n ...", "def stop_run(arn=None):\n pass", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "def restart(self):\n self.stop()\n self.start(init=False)", "def reboot(self, node):", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "def _stop(self):", "def shutdown_kernel(self, now=False, restart=False):", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def terminate(self):\n self._running = False", "def restart_kernel(self, now=False, **kw):", "def kill(self):\n self.error_code = 'KILLED'\n self.running = False", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def request_shutdown(self, restart=False):", "def _shutdown(self):", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def restart(self):\n\n self.stop()\n self.start()", "def pre_stop(self):", "async def kill(self, restart: bool = False) -> None:\n pass", "def post_stop(self):", "def shutdown(self):\n rospy.loginfo(\"Stopping Project\")\n rospy.sleep(1)", "def restart(self, timestamp=0.0, **keywords):\n self.services.debug('restart() method called')\n pass", "def stop(self):\n self.microblaze.reset()", "def shutdown(self):\n\n pass", "def restart_salt():\n stop_salt()\n start_salt()", "def shutdown(self):\n\n raise NotImplementedError", "def shutdown() -> None: # TODO Better place for this code\n # TODO Safe landing\n pass", "def on_StopNode_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n print(\"We will kill all gman process!\")\n reply = QMessageBox.question(self, '确认', '确认kill所有gman任务吗', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n autokillGman()\n self.OnlyDisplay(\"kill -9 |grep gman\")\n else:\n print(\"Keep GMAN run.......!\")", "def lysis(self) :\n self.kill()\n return True", "def stop(self):", "def stop(self):", "def shutdown_kernel(self, now=False, restart=False):\n pass", "def stop(self):\n print(\"Stopping accessory.\")", "def stop():", "def stop():", "def stop():", "def stop():", "def Halt(self):\n self.run_halt = True", "def stop(self):\n\t\tpass", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def GET_kill(self):\n sys.exit(0)", "def stop_and_restart():\n logging.info(\"Restarting eduzen_bot...\\n\")\n bot.updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def request_shutdown(self, kernel_id, restart=False):", "def _gracefully_stop(self):\n pass", "def terminate(self):", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def restart_kernel(self, kernel_id, now=False):", "def stop(self):\n self.killed = True", "def stop(self):\n super().stop()", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def Shutdown(self):\n pass", "def Shutdown(self):\n pass", "def remote_kill():", "def stop(self):\n self.scion_sh('stop')", "def initiate_shutdown(self) -> None:", "def stop() -> None:", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def doReboot(self):\n logging.info(\"%s doReboot\", ModuleName)\n if CB_CELLULAR_BRIDGE:\n try:\n Popen([\"/usr/bin/modem3g/sakis3g\", \"--sudo\", \"disconnect\"])\n except Exception as ex:\n logging.warning(\"%s deReboot. sakis3g disconnect failed\", ModuleName)\n logging.warning(\"%s Exception: %s %s\", ModuleName, type(ex), str(ex.args))\n try:\n self.cbSendManagerMsg({\"msg\": \"stopall\"})\n except Exception as ex:\n logging.warning(\"%s Cannot tell manager to stop, just rebooting\", ModuleName)\n logging.warning(\"%s Exception: %s %s\", ModuleName, type(ex), str(ex.args))\n # Tidy up\n #self.mgrPort.stopListening()\n reactor.callLater(REBOOT_WAIT, self.reboot)", "def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()", "def restart(self):\n self.__init__()\n return", "def shutdown_kernel(self, kernel_id, now=False, restart=False):", "def shutdown(self) -> None:\n pass", "def stop(self):\r\n pass", "def stopTestRun(self):", "def gracefully_terminate(self):\n self.running = False", "def shutdown(self):\n raise NotImplementedError", "def stop (self):\n pass", "def stop (self):\n pass" ]
[ "0.6768174", "0.67484814", "0.6615716", "0.6595835", "0.6585611", "0.6582545", "0.658062", "0.64321345", "0.63734066", "0.6317657", "0.62966824", "0.6273129", "0.6253487", "0.6223028", "0.6221627", "0.6221627", "0.62077504", "0.62009984", "0.6190042", "0.6188181", "0.6167238", "0.6166977", "0.6130836", "0.6118058", "0.60984856", "0.60863936", "0.60830235", "0.60830235", "0.6059249", "0.60590214", "0.6057373", "0.6057373", "0.6057373", "0.60426855", "0.6041298", "0.603712", "0.6012718", "0.60039735", "0.60028046", "0.59764564", "0.59744626", "0.5968013", "0.5960265", "0.59599215", "0.5952135", "0.5945581", "0.5944476", "0.5944476", "0.5943999", "0.5929582", "0.5918108", "0.5918108", "0.5918108", "0.5918108", "0.5913502", "0.5906969", "0.59057933", "0.59016067", "0.5897451", "0.5896129", "0.5892534", "0.58821064", "0.58815396", "0.5874171", "0.58731675", "0.5861758", "0.5859386", "0.5859386", "0.5859386", "0.5859386", "0.5859386", "0.5859386", "0.5859386", "0.58465487", "0.58465487", "0.58414173", "0.5838206", "0.58309734", "0.58259207", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5822224", "0.5820004", "0.58182454", "0.58133954", "0.5809054", "0.58008915", "0.5800289", "0.5783412", "0.57833636", "0.57833457", "0.57799274", "0.57799274" ]
0.0
-1
used to check misc API
def api(self, url, response_checker=default_checker.default_checker): return self._api_call(url, response_checker)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check():", "def check_eapi(self, eapi):\n\t\treturn True", "def test_api() -> bool:\r\n weather = False\r\n news = False\r\n covid = False\r\n if check_weather_version():\r\n logging.info(\"Weather API version is up to date (check_weather_version())\")\r\n weather = True\r\n else:\r\n logging.info(\"Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED\")\r\n if check_news_version():\r\n logging.info(\"News API version is up to date (check_news_version())\")\r\n news = True\r\n else:\r\n logging.info(\"News API version is not up to date (check_news_version()) - ACTION REQUIRED\")\r\n if check_covid_version():\r\n logging.info(\"Covid-19 API version is up to date (check_covid_version())\")\r\n covid = True\r\n else:\r\n logging.info(\"Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED\")\r\n return bool(weather and news and covid)", "def test_check_system_python_api(self):\n\n errors, successes = check_system.check_system()\n self.assertTrue(len(errors) + len(successes) >= 4)", "def check_status(self):", "def check(self) -> None:", "def check():\n hokusai.check()", "def check_auth():", "def apicheck():\n\n async def predicate(ctx: commands.Context):\n travitia_keys = await ctx.bot.get_shared_api_tokens(\"travitia\")\n key = travitia_keys.get(\"api_key\") is None\n if ctx.invoked_with == \"help\" and key:\n return False\n if key:\n await ctx.send(\"The API key is not registered, the command is unavailable.\")\n return False\n return True\n\n return commands.check(predicate)", "def bad_api(api):\n return (not api.isalpha()) or (not api.isupper()) or (API_LIST.get(api) is None)", "def do_api_check(self):\n for charid in self.conn.get_api_characters():\n self.update_character(charid)\n for corpid in self.conn.get_api_corporations():\n self.update_corporation(corpid)\n for allyid in self.conn.get_api_alliances():\n self.update_alliance(allyid)", "def validate(self, apiobj, method, api, param, safe):", "async def _check_api(self) -> None:\n await self._api_request(\"devices\")", "def test_api_base(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url()))\n j = r.json()\n self.assertIn('gages', j)\n self.assertIn('sections', j)\n self.assertIn('regions', j)\n self.assertIn('rivers', j)\n self.assertIn('sensors', j)\n self.assertIn('samples', j)", "def test_get_info(self):\n pass", "def sanity_check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def _check_config(self):", "def check_vulnerability(self):\n\t\tpass", "def test_getusage(self):\n ret = {\"message\": \"No Random.org api key or api version found.\", \"res\": False}\n self.assertDictEqual(random_org.getUsage(), ret)\n\n self.assertDictEqual(\n random_org.getUsage(api_key=\"peW\", api_version=\"1\"),\n {\n \"bitsLeft\": None,\n \"requestsLeft\": None,\n \"res\": True,\n \"totalBits\": None,\n \"totalRequests\": None,\n },\n )", "def check_errors(self) -> None:", "def check(self):\n invalid = []\n\n if not self.route:\n invalid.append(('route', 'missing'))\n elif not self.route[1] in ['GET', 'POST', 'PUT']:\n invalid.append(('route', 'invalid method: %s' % self.route[1]))\n\n has_2xx = False\n for rcode in self.return_codes:\n code = rcode[0]\n if code >= 200 and code < 300:\n has_2xx = True\n break\n if not has_2xx:\n invalid.append(('return_codes', 'Missing succes return code doc'))\n\n if self.client_auth is None:\n invalid.append(\n ('client_auth', 'Please provide client auth requirement'))\n\n if self.user_auth is None:\n invalid.append(\n ('user_auth', 'Please provide user auth requirement'))\n\n if invalid:\n msgs = []\n for error in invalid:\n msgs.append(\"%s: %s\" % error)\n raise ValueError(\n \"APIFunc for %s is invalid: %s\"\n % (self.viewfunc.__name__,\n ', '.join(msgs)))", "def Check(self, parameters):", "def check_validity(self):", "def verify():", "def getStatus():", "def status_check():\n return {\"status\": \"OK\"}", "def health_check():\n return dict(api_status='OK')", "def testApi(self):", "def check_api(submitted_key, users_key):\r\n if users_key != submitted_key:\r\n return False\r\n else:\r\n return True", "def verify(self):", "def test_request(self):\n self.assertIn('list', self.api.request('sys.settings.get').data,\n msg=\"request() doesn't work properly. 'list' is not found in the response\")", "def test_get_api(self):\n # Get metadata list\n _logger.info('Get sequencerun API')\n response = self.client.get('/sequencerun/')\n self.assertEqual(response.status_code, 200, 'Ok status response is expected')\n\n _logger.info('Check if API return result')\n result_response = response.data['results']\n self.assertGreater(len(result_response), 0, 'A result is expected')\n\n _logger.info('Check if unique data has a single entry')\n response = self.client.get('/sequencerun/?msg_attr_action=statuschanged')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 1, 'Single result is expected for unique data')\n\n _logger.info('Check Invalid keyword')\n response = self.client.get('/sequencerun/?foo=bar')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 0, 'No results are expected for unrecognized query parameter')", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_tips(self):\n res = self.client.get(\"/tips\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Bits and pieces\" in data", "def check(self, runtime):", "def _verify(self):\n pass", "def check(self):\n out = self.LeggiDocumento('000')\n if 'success' in out and 'result' in out:\n out.pop('result')\n else:\n out.pop('request')\n return out", "def test_1():\n\tassert api_call().status_code == 200", "def check_api_parameters(rstfiles, apiinfo):\n pat = re.compile(\n r'^\\.\\.\\s+py:(method|function|class)::\\s+(\\S+)\\s*\\(\\s*(.*)\\s*\\)\\s*$'\n )\n check_passed = []\n check_failed = []\n api_notfound = []\n for rstfile in rstfiles:\n rstfilename = osp.join('../docs', rstfile)\n print(f'checking : {rstfile}')\n with open(rstfilename, 'r') as rst_fobj:\n func_found = False\n for line in rst_fobj:\n mo = pat.match(line)\n if mo:\n func_found = True\n functype = mo.group(1)\n if functype not in ('function', 'method'):\n check_passed.append(rstfile)\n continue\n funcname = mo.group(2)\n paramstr = mo.group(3)\n flag = False\n func_found_in_json = False\n for apiobj in apiinfo.values():\n if (\n 'all_names' in apiobj\n and funcname in apiobj['all_names']\n ):\n func_found_in_json = True\n if 'args' in apiobj:\n if paramstr == apiobj['args']:\n print(\n f'check func:{funcname} in {rstfilename} with {paramstr}'\n )\n flag = _check_params_in_description(\n rstfilename, paramstr\n )\n else:\n print(\n f'check func:{funcname} in {rstfilename} with {paramstr}, but different with json\\'s {apiobj[\"args\"]}'\n )\n flag = _check_params_in_description(\n rstfilename, paramstr\n )\n else: # paddle.abs class_method does not have `args` in its json item.\n print(\n f'check func:{funcname} in {rstfilename} with its FullArgSpec'\n )\n flag = _check_params_in_description_with_fullargspec(\n rstfilename, funcname\n )\n break\n if not func_found_in_json: # may be inner functions\n print(\n f'check func:{funcname} in {rstfilename} with its FullArgSpec'\n )\n flag = _check_params_in_description_with_fullargspec(\n rstfilename, funcname\n )\n if flag:\n check_passed.append(rstfile)\n print(f'check success: {rstfile}')\n else:\n check_failed.append(rstfile)\n print(f'check failed: {rstfile}')\n break\n if not func_found:\n api_notfound.append(rstfile)\n print(f'check failed (object not found): {rstfile}')\n print(f'checking done: {rstfile}')\n return check_passed, check_failed, api_notfound", "def check(self):\n raise NotImplementedError", "def check_ngin_access(subdata):\r\n r = verifyPort(999)\r\n print (r.status_code)\r\n if r.status_code == 204:\r\n return True\r\n else:\r\n return False\r\n \"\"\"\r\n if (subdata[0][0]):\r\n return True \r\n else:\r\n return False\r\n \"\"\"", "def check_global_request(self, kind, msg):\n return False", "def test_21(self):\n assert 'False' == Api.requestBlock('test-21')", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def sanity_check(self):\n return True", "def check_stability(self):", "def test_server_info(self):\n pass", "def check_all(c):", "def test_root_api(self):\n\n # GIVEN API\n\n # WHEN fetching available applications and models\n response = self.api.root_api()\n\n # THEN it should succeed\n self.assertTrue(response.success)\n\n # AND it should have valid data\n for item in response.data:\n self.assertEqual(len(item.keys()), 3)\n self.assertEqual(set(item.keys()), set(['model', 'actions', 'app_label']))\n\n # AND it contains also UI application models\n self.assertTrue(any('test' in d['app_label'] for d in response.data))\n\n # AND public applications are also available\n data = [item for item in response.data if item['app_label'] == 'admin']\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0]['model'], None)\n self.assertEqual(len(data[0]['actions'].keys()), 2)", "def check(self, api, user, cost):\n details = {'api': api, 'user': user, 'cost': cost, 'fqdn': self.fqdn}\n\n self.check_user(user, cost, details)\n self.check_api(api, cost, details)\n self.check_system(cost, details)", "def info() -> None:", "def _check_validity(self):\n pass", "def test_25(self):\n assert 'False' == Api.requestBlock('test-25')", "def test_35(self):\n assert 'False' == Api.requestBlock('test-35')", "def test_root_public_api(self):\n\n # GIVEN public API\n # WHEN fetching available applications and models\n response = self.api.root_api(public=True)\n\n # THEN it should succeed\n self.assertTrue(response.success)\n\n # AND it should have valid data\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.data[0]['app_label'], 'auth')\n self.assertEqual(response.data[1]['app_label'], 'admin')", "def _CommonChecks(input_api, output_api):\n results = []\n results.extend(_CheckNoInterfacesInBase(input_api, output_api))\n results.extend(_CheckNoTraceEventInclude(input_api, output_api))\n results.extend(_WarnPbzeroIncludes(input_api, output_api))\n results.extend(CheckChangeLintsClean(input_api, output_api))\n return results", "def check_api_keys(self, request):\n app_id, api_obj = request.META.get(\"HTTP_APP_ID\"), None\n api_secret_key = request.META.get(\"HTTP_API_SECRET_KEY\")\n if app_id and api_secret_key:\n # validate app_id and api_secret_key\n app_id_bool = self._validate_app_id(app_id)\n if not app_id_bool:\n return False, self.app_id_message\n api_secret_key_bool = self._validate_api_secret_key(api_secret_key)\n if not api_secret_key:\n return False, self.api_secret_key_message\n try:\n api_obj = ApiApp.objects.get(app_id=app_id, api_secret_key=api_secret_key, active=True)\n if api_obj:\n self.app(request, api_obj)\n return True, ''\n except ApiApp.DoesNotExist:\n self.app(request, api_obj)\n return False, self.message\n else:\n self.app(request, api_obj)\n return False, self.message", "def api():\n\treturn \"The API call\"", "def __CheckFilesAndData(self):\n APIChoose = tasks.CheckPointFactory_Connection(self.ServerInfo['MgmtServerData'].MgmtR80ApiVersion)\n #conn = tasks.CheckPointAPI(self.ServerInfo['MgmtServerData'].ServerIP,\n # self.ServerInfo['MgmtServerData'].MgmtPort)\n conn = APIChoose(self.ServerInfo['MgmtServerData'].ServerIP, self.ServerInfo['MgmtServerData'].MgmtPort)\n fileTCPPorts = Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathTCPPorts)\n fileUDPPorts= Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathUDPPorts)\n fileObjects = Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathNetObjects)\n fileNetworks = Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathNetworksObjects)\n #Si no existen los archivos\n print(fileUDPPorts)\n conn.ChkpLogin(self.ServerInfo['MgmtServerUser'].R80User, self.ServerInfo['MgmtServerUser'].R80Password)\n if not(fileTCPPorts.is_file() and fileObjects.is_file() \\\n and fileUDPPorts.is_file() and fileNetworks.is_file()):\n #ENTRA CON TRUE\n fileTCPPorts.touch()\n fileObjects.touch()\n fileUDPPorts.touch()\n fileNetworks.touch()\n #tcpPorts = json.dumps(conn.ChkpShowServicesTCP())\n tcpPorts = json.dumps(conn.ChkpShowFullServicesTCP())\n udpPorts = json.dumps(conn.ChkpShowFullServicesUDP())\n fileTCPPorts.write_text(tcpPorts)\n fileUDPPorts.write_text(udpPorts)\n hosts = json.dumps(conn.ChkpShowFullHosts())\n fileObjects.write_text(hosts)\n networks = json.dumps(conn.ChkpShowFullNetworks())\n fileNetworks.write_text(networks)\n else:\n #Existen los archivos tenemos que verificar la ultima version de la API si no actualizarlos\n DBChkpVersion = self.ServerInfo['MgmtServerData'].LastPublishSession\n RemoteVersion = conn.ChkpShowLastPublishedSession()\n RemoteVersion = RemoteVersion['publish-time']['posix']\n #Si las versiones de Base de datos son distintas vamos por todo nuevamente\n if DBChkpVersion != RemoteVersion:\n print('Versiones diferentes actualizando la versiones')\n #tcpPorts = json.dumps(conn.ChkpShowServicesTCP())\n tcpPorts = json.dumps(conn.ChkpShowFullServicesTCP())\n udpPorts = json.dumps(conn.ChkpShowFullServicesUDP())\n fileTCPPorts.write_text(tcpPorts)\n fileUDPPorts.write_text(udpPorts)\n hosts = json.dumps(conn.ChkpShowFullHosts())\n fileObjects.write_text(hosts)\n networks = json.dumps(conn.ChkpShowFullNetworks())\n fileNetworks.write_text(networks)\n self.ServerInfo['MgmtServerData'].LastPublishSession = RemoteVersion\n self.ServerInfo['MgmtServerData'].save()\n else:\n print('Mismas versiones nada que modificar')\n conn.LogOutCheckPoint()", "def match_api_keys(key, ip):", "def test_kyc_get_legal(self):\n pass", "def healthcare():", "def probe_api():\n\n info = loads(get(url).text)\n return info", "def test_26(self):\n assert 'False' == Api.requestBlock('test-26')", "def test_36(self):\n assert 'False' == Api.requestBlock('test-36')", "def check_http_request_validity(http_raw_data) -> HttpRequestState:\n\n global version\n r1 = http_raw_data.split('\\n')[0]\n r2 = http_raw_data.split('\\n')[1]\n\n if (re.search(\"GET\", r1) != None) and (re.search(\"/\", r1) != None) and (re.search(\"HTTP/1.0\", r1) != None) and (re.search(\":\", r2)):\n return HttpRequestState.GOOD\n\n if (re.search(\"GET\", r1) != None) and (re.search(\"http://\", r1) != None) and (re.search(\"HTTP/1.0\", r1) != None):\n return HttpRequestState.GOOD\n\n if (re.search(\"GET\", r1)!=None) and (re.search(\"/\", r1)!=None) and (re.search(\"HTTP/1.0\",r1)!=None) :\n if (re.search(\":\", r2) == None) :\n return HttpRequestState.INVALID_INPUT\n\n if(re.search(\"GOAT\", r1)!=None):\n return HttpRequestState.INVALID_INPUT\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\" , r1)!=None) and (re.search(\"/\",r1)!=None) and (re.search(\"HTTP/1.0\", r1) != None) and (re.search(\":\", r2)):\n\n return HttpRequestState.NOT_SUPPORTED\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\" ,r1)!=None) and (re.search(\"/\",r1)!=None) and (re.search(\"HTTP/1.0\",r1)!=None):\n return HttpRequestState.INVALID_INPUT\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\", r1) != None) and (re.search(\"HTTP/1.0\", r1) == None) and (re.search(\":\", r2) != None):\n return HttpRequestState.INVALID_INPUT\n print(\"*\" * 50)\n print(\"[check_http_request_validity] Implement me!\")\n print(\"*\" * 50)\n\n return HttpRequestState.PLACEHOLDER", "def test_10(self):\n assert 'False' == Api.requestBlock('test-10')", "def test_api_ping_failed_missing_api(self):\r\n res = self.testapp.get('/ping?api_key=' + API_KEY,\r\n status=200)\r\n ping = json.loads(res.body)\r\n\r\n self.assertTrue(not ping['success'])\r\n self.assertEqual(ping['message'], \"The API url should be /api/v1\")\r\n self._check_cors_headers(res)", "def test_27(self):\n assert 'False' == Api.requestBlock('test-27')", "def check_api_access(info):\n\n try:\n file = open(PATH + '/../DB/access.json', 'r')\n accessData = json.load(file)\n except:\n return False\n\n try:\n application = info['application_name']\n applicationData = accessData.get(application)\n\n if applicationData is None:\n return False\n\n timestamp = applicationData[\"timestamp\"]\n if info['timestamp'] == timestamp:\n return True \n return False\n except:\n return False", "def test_23(self):\n assert 'False' == Api.requestBlock('test-23')", "def test_37(self):\n assert 'False' == Api.requestBlock('test-37')", "def verify_core_connection():\n if not base_url or not api_credentials:\n retrieve_connection_info()\n return", "def verify_core_connection():\n if not base_url or not api_credentials:\n retrieve_connection_info()\n return", "def test_functional_good_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n if response.status_code != BAD_GATEWAY:\n print(\"\\nGATEWAY is OK\")\n self.assertEqual(response.status_code, OK)\n content = response.json()\n self.assertEqual(len(content), 3)\n self.assertTrue(content.get(\"temp\"))\n self.assertTrue(content.get(\"city\"))\n else:\n print(\"\\nGATEWAY is RESET BY PEER\")", "def check_post(self, url, info):\r\n \r\n test = requests.get(url, headers = self.headers).json()['results']\r\n if info == test:\r\n return True\r\n return False", "def validate_snx_api_key():\n api_data = {} # type: Dict[str, str]\n response = http_request(endpoint=API_QUOTA, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return 'ok'", "def test_46(self):\n assert 'False' == Api.requestBlock('test-46')", "def test_get_api_resources(self):\n pass", "def test_41(self):\n assert 'False' == Api.requestBlock('test-41')", "def test_meme_get(self):\n pass", "def test_ice_and_fire_external_invalid_search(self):\n response = self.client.get('/api/external-books?name=abc23123', format='json')\n self.assertEqual(200, response.data['status_code'])\n self.assertEqual(0, len(response.data['data']))", "def ping_missing_api(request):\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'The API url should be /api/v1'\r\n })", "def rpc_match():", "def test_api_response_data(self):", "def verify(self):\r\n pass", "def test_34(self):\n assert 'False' == Api.requestBlock('test-34')", "def util():\n pass", "def util():\n pass", "def check_availability(self):\n pass", "def test_52(self):\n assert 'False' == Api.requestBlock('test-52')", "def test_43(self):\n assert 'False' == Api.requestBlock('test-43')", "def test_28(self):\n assert 'False' == Api.requestBlock('test-28')", "def test_kyc_get_validation_legal(self):\n pass", "def test_24(self):\n assert 'False' == Api.requestBlock('test-24')" ]
[ "0.6912855", "0.66411334", "0.64717084", "0.6467784", "0.64383173", "0.62957495", "0.6267212", "0.62266237", "0.6179696", "0.61172795", "0.6099899", "0.6095906", "0.6094198", "0.6077264", "0.6034731", "0.6029176", "0.60286736", "0.60286736", "0.60286736", "0.60286736", "0.59865344", "0.59782976", "0.5953818", "0.5946571", "0.59106237", "0.5902302", "0.5873179", "0.5864856", "0.5846033", "0.57959986", "0.57904315", "0.5788584", "0.57870054", "0.5757031", "0.575115", "0.5737439", "0.57274455", "0.57274455", "0.57260096", "0.5721549", "0.57146776", "0.56997496", "0.5682231", "0.567623", "0.5671942", "0.56505716", "0.5649011", "0.5639382", "0.5631788", "0.56292504", "0.5628809", "0.56286204", "0.5618657", "0.55937594", "0.5563869", "0.55577654", "0.55522925", "0.55501425", "0.5545709", "0.5543758", "0.55428904", "0.5538422", "0.5533909", "0.5533399", "0.55283844", "0.5526437", "0.55255103", "0.5522777", "0.551452", "0.55106026", "0.5508632", "0.55066496", "0.5505326", "0.55020875", "0.54990035", "0.5495104", "0.54919064", "0.5488345", "0.5487972", "0.5487972", "0.5484462", "0.5484418", "0.54841995", "0.548279", "0.5482036", "0.5479591", "0.5479496", "0.5479172", "0.5472531", "0.5471529", "0.54698086", "0.54675424", "0.54655623", "0.5464132", "0.5464132", "0.54618376", "0.54609007", "0.5457644", "0.54569286", "0.5454299", "0.5454039" ]
0.0
-1
call the api and check against previous results the query is written in a file
def _api_call(self, url, response_checker): self.request_compare(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data( filepath_query, filepath_results ):\n with open( filepath_query, 'r' ) as query_file:\n query = json.load( query_file )\n \n query_text = query['query']['multi_match']['query']\n query_scores = query['nlp_scores']\n query_data = {\n 'query_text' : query_text,\n 'bias_score' : query_scores['bias_score'],\n 'vocab_richness' : query_scores['stylo_scores']['vocab_richness'],\n 'hapax_legomena' : query_scores['stylo_scores']['hepax_legomena'],\n 'wordlength' : query_scores['stylo_scores']['readability_measures']['average_wordlength'],\n 'sentlength' : query_scores['stylo_scores']['readability_measures']['average_sentlength'],\n 'spelling_errors' : query_scores['stylo_scores']['spelling_errors'],\n 'topics' : query_scores['topics']\n }\n\n with open( filepath_results ) as results_file:\n results = json.load( results_file )\n \n results_data = []\n for doc in results:\n argID = doc['_source']['argsMeID']\n premise = doc['_source']['premise']\n average_wordlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_wordlength']\n average_sentlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_sentlength']\n bias_score = doc['nlp_scores']['bias_score']\n bias_distance = doc['bias_distance']\n stylo_distance = doc['stylo_distance']\n topic_match_count = doc['topic_match_count']\n old_score = doc['old_score']\n new_score = doc['new_score']\n scoring_distance = doc['scoring_distance']\n old_rank = doc['old_rank']\n new_rank = doc['new_rank']\n \n doc_data = {\n 'argID' : argID,\n 'premise' : premise,\n 'wordlength' : average_wordlength,\n 'sentlength' : average_sentlength,\n 'bias_score' : bias_score,\n 'bias_distance' : bias_distance,\n 'stylo_distance' : stylo_distance,\n 'topic_match_count' : topic_match_count,\n 'old_score' : old_score,\n 'new_score' : new_score,\n 'scoring_distance' : scoring_distance,\n 'old_rank' : old_rank,\n 'new_rank' : new_rank\n }\n results_data.append( doc_data )\n\n data_tuple = ( query_data, results_data )\n return data_tuple", "def main(index, output_file, **kwargs):\n\n output_jsonl = None\n output_text = None\n if 'json' in kwargs['output_format']:\n fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.jsonl'\n output_jsonl = open(fname, 'w')\n if 'text' in kwargs['output_format']:\n fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.txt'\n output_text = open(fname, 'w')\n\n if kwargs.get('query') is not None:\n query = json.load(kwargs.get('query'))\n else:\n query = {\n \"sort\": [\"warc_id\"],\n \"size\": 200,\n \"query\": {\n \"bool\": {\n \"filter\": {\n \"bool\": {\n \"must_not\": [\n {\n \"query_string\": {\n \"analyze_wildcard\": True,\n \"default_field\": \"*\",\n \"query\": \"\"\"group:(*.patches OR *.commits* OR\n *.dist-commits* OR *.version-control* OR *.git* OR *.cvs* OR *.svn*\n OR *.trunk* OR *.scm* OR *.pkg*) OR (group:(*.bugs* OR *.issues*\n OR *.bugzilla* OR *.codereview*) OR \n headers.subject.keyword:(*jira* OR *bugzilla*) OR\n headers.from_email.keyword:(*bugs* OR *bugzilla* OR *jira* OR *jboss*))\"\"\"\n }\n }\n ],\n \"must\": {\"term\": {\"lang\": \"en\"}},\n \"minimum_should_match\": 1,\n \"should\": [\n {\"wildcard\": {\"group\": \"gmane.culture.*\"}},\n {\"wildcard\": {\"group\": \"gmane.politics.*\"}},\n {\"wildcard\": {\"group\": \"gmane.science.*\"}},\n {\"wildcard\": {\"group\": \"gmane.education.*\"}},\n {\"wildcard\": {\"group\": \"gmane.music.*\"}},\n {\"wildcard\": {\"group\": \"gmane.games.*\"}},\n {\"wildcard\": {\"group\": \"gmane.recreation.*\"}}\n ]\n }\n }\n }\n }\n }\n\n logger.info('Retrieving initial batch')\n es = util.get_es_client()\n results = util.es_retry(es.search, index=index, scroll='10m', size=kwargs['scroll_size'], body=query)\n\n skip = kwargs['skip']\n if skip > 0:\n logger.info('Skipping ahead {} messages'.format(skip))\n\n sampled_groups = {}\n num_samples = 0\n num_skipped = 0\n\n try:\n with tqdm(desc='Calculating progress', unit=' messages') as progress_bar:\n while num_samples < kwargs['total_mails'] and len(results['hits']['hits']) > 0:\n for hit in results['hits']['hits']:\n if skip > 0 and num_skipped < skip:\n progress_bar.set_description('Skipping messages')\n progress_bar.total = skip\n num_skipped += 1\n progress_bar.update()\n continue\n elif (skip == 0 or num_skipped >= skip) and num_samples == 0:\n progress_bar.set_description('Sampling messages')\n progress_bar.total = kwargs['total_mails']\n progress_bar.n = 0\n progress_bar.last_print_n = 0\n progress_bar.update(0)\n\n src = hit['_source']\n text_plain = src['text_plain']\n\n prev_samples = sampled_groups.get(src['group'], 0)\n if kwargs['group_limit'] and prev_samples > kwargs['group_limit']:\n continue\n sampled_groups[src['group']] = prev_samples + 1\n\n num_samples += 1\n progress_bar.update()\n\n if output_jsonl:\n json.dump({'text': text_plain,\n 'meta': {k: src[k] for k in src.keys() if k not in ['text_plain', 'text_html']},\n 'labels': []}, output_jsonl)\n output_jsonl.write('\\n')\n\n if output_text:\n output_text.write(util.normalize_message_text(text_plain))\n output_text.write('\\n')\n\n if num_samples >= kwargs['total_mails']:\n break\n\n results = util.es_retry(es.scroll, scroll_id=results['_scroll_id'], scroll='10m')\n finally:\n es.clear_scroll(scroll_id=results['_scroll_id'])\n\n if output_jsonl:\n output_jsonl.close()\n if output_text:\n output_text.close()", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = merge_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n results = {}\n hits_limit = 500\n start_at = 1\n counter = 0\n while True:\n url = create_url(keyword, hits_limit, start_at, api_key)\n records = get_records_from_url(url)\n total_results = get_total_hits(records)\n records = split_records(records)\n records_on_page = len(records)\n if records_on_page == 0:\n break\n else:\n for record in records:\n counter += 1\n id_no = extract_id_number(record)\n processed_dict = {'ID': id_no, 'problem': []}\n processed_record = parse_record(\n record, processed_dict, log)\n if id_no not in results:\n results[id_no] = processed_record\n if counter % 100 == 0:\n print(\"Processed {} out of {}\".format(\n counter, total_results))\n start_at += hits_limit\n time.sleep(THROTTLE)\n print(\"[{}] : fetched {} records to {}.\".format(\n keyword, len(results), filename))\n save_data(results, filename)", "def setupQuery(self, file):\n file.write(\"QUERY(FALSE);\\n\")\n file.write(\"COUNTEREXAMPLE;\\n\")\n return", "def run_search(dict_file, postings_file, queries_file, results_file):\n print('running search on the queries...')\n\n with open(dict_file, mode=\"rb\") as dictionary_file,\\\n open(postings_file, mode=\"rb\") as posting_file,\\\n open(queries_file, encoding=\"utf8\") as q_in,\\\n open(results_file, mode=\"w\", encoding=\"utf8\") as q_out:\n\n ''' load dictionary and postings '''\n # dict(k,v) -> token, Entry(frequency, offset, size)\n # postings -> the dict containing the entries and metadata of the postings file\n # skiplist -> list of all doc IDs\n dictionary = pickle.load(dictionary_file)\n postings = Posting(dictionary, posting_file)\n file_list = postings['__all__']\n\n ''' process query, and write the query result to result file '''\n for query in q_in:\n query = preprocess_query(query)\n algebra = boolean.BooleanAlgebra()\n # Simplify query, e.g. tautology\n expression = algebra.parse(query, simplify=True)\n # special cases after simplification\n if str(expression) == \"0\":\n print(\"\", end='\\n', file=q_out)\n continue\n elif str(expression) == \"1\":\n print(\" \".join(map(str, file_list)), end='\\n', file=q_out)\n continue\n\n print(\" \".join(map(str, shunting(get_input(str(expression))).eval(\n postings, file_list).list)), end='\\n', file=q_out)\n\n # add posting skiplist and list of all docIDs to corresponding symbol\n # for sym in expression.symbols:\n # if normalize(sym) == \"IGNORE\":\n # norm_sym = str(normalize(sym))\n # setattr(sym, \"obj\", norm_sym)\n # setattr(sym, \"skiplist\", postings[norm_sym])\n # setattr(sym, \"list\", postings[norm_sym].list)\n # setattr(sym, \"file_list\", file_list.list)\n\n # evaluate the query\n # args[]: list of sub-terms\n # For symbols and base elements this tuple is empty,\n # for boolean functions it contains one or more symbols, elements or sub-expressions.\n # print(\" \".join(map(str, expression.evaluate_query(expression.args).list)),\n # end='\\n', file=q_out)", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = intersect_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def run_queries(q, file): \n data = csv(cd(file)) # modified to point to Data dir.\n seen = set(col(0, data))\n \n for q in reversed(q):\n for t in twitter(q):\n if t.id not in seen:\n data.append((\n t.id,\n t.author,\n t.language,\n t.text,\n t.date,\n t.likes,\n ))\n seen.add(t.id)\n\n data.save()", "def query_file(self):\n print(\"Start sending Query requests of av after AV upload for file {}\".format(self.file_name))\n request = copy.deepcopy(self.request_template)\n request['request'][0]['md5'] = self.md5\n data = json.dumps(request)\n response_j = json.loads('{}')\n status_label = False\n retry_no = 0\n while (not status_label) or (status_label == \"NOT_FOUND\"):\n print(\"Sending Query request for av for file {}\".format(self.file_name))\n response = requests.post(url=self.url + \"query\", data=data, verify=False)\n response_j = response.json()\n status_label = response_j['response'][0]['status']['label']\n if status_label != \"NOT_FOUND\":\n break\n print(\"av Query response status for file {} is still pending\".format(self.file_name))\n time.sleep(SECONDS_TO_WAIT)\n retry_no += 1\n if retry_no == MAX_RETRIES:\n print(\"Reached query max retries. Stop waiting for av results for file {}\".format(self.file_name))\n break\n return response_j", "def run(self):\n\n # TODO: Logic to get data, enforce request limits, and filter out duplicates", "def getResults():", "def savequerydata(searchcriteria, filename='', searchlimit=10):\n\n searchcriteria = searchWoKTools.updatesc(searchcriteria)\n\n i = 0\n querydata = []\n try:\n with open(os.path.join(os.getcwd(), 'results', 'searchWoKResults', filename + 'queryData.txt'), 'wt') as output:\n for criterion in searchcriteria:\n i += 1\n query = criterion['material']\n print('Searching for ' + query + '... (' + str(i) + '/' + str(len(searchcriteria)) + ')')\n\n searchparam = ''\n for n in range(len(query.split(', '))):\n searchparam += 'topic:' + query.split(', ')[n]\n if (criterion['numQueries'] > 1) and (n < (len(query.split(', ')) - 1)):\n searchparam += ' or '\n searchdata = searchWoKTools.getsearchdata(searchparam, searchlimit)\n searchdata[0].update(criterion)\n querydata.append(searchdata)\n\n json.dump(querydata, output)\n except Exception as inst:\n print(type(inst))\n return querydata", "def write_results(file, reqs_to_get, output_file, t_min=None, t_max=None): #pylint: disable=redefined-outer-name\n # Initialize a dictionary for the results \n res_dict = {}\n \n # Load Results \n if os.path.exists(os.path.splitext(file)[0] + '.adm'):\n Adams.execute_cmd('file adams_data_set read file_name=\"{}\"'.format(os.path.splitext(file)[0] + '.adm'))\n Adams.execute_cmd('file analysis read file_name=\"{}\" model_name={}'.format(file, os.path.splitext(os.path.split(file)[-1])[0]))\n else:\n Adams.execute_cmd('file analysis read file_name=\"{}\"'.format(file))\n\n # Get the model object\n mod = Adams.Models.get([mn for mn in Adams.Models][0])\n \n # Get the analysis object\n ans = mod.Analyses.get([an for an in mod.Analyses][0])\n\n time = ans.results.get('TIME').values\n _time_np = np.asarray(time)\n i_min = 0 if t_min is None else np.argmax(_time_np >= t_min)\n i_max = len(time)-1 if t_max is None else np.argmax(_time_np >= t_max)\n\n # Store the time values\n res_dict['time'] = time[i_min:i_max] \n\n # Loop over the requsted results\n for res_name, res_comps in reqs_to_get.items():\n print(f'res_comps={res_comps}')\n \n # Get the result object handle\n res = ans.results.get(res_name) if res_name in ans.results.keys() else get_req_from_comment(res_name, ans)\n\n for res_comp in res_comps:\n\n print(f'res_comp={res_comp}')\n print(f'req = {res}')\n \n values = res.get(res_comp).values[i_min:i_max]\n res_dict[f'{res_name}_{res_comp}'] = values\n \n # Write to a csv file\n with open(output_file, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(res_dict.keys())\n writer.writerows(zip(*res_dict.values()))", "def execute_event(self):\n try:\n with open(self._import_path_input.get(), 'r') as \\\n raw_data_file, \\\n open(self._export_path_input.get(),\n 'w', newline='') as processed_data_file:\n\n # all situation counter\n count_true = 0\n count_false = 0\n count_tag = 0\n count_not_found = 0\n\n # get the user input api(url)\n original_url = self._url_input.get()\n\n # true/false and tag/NotFound flags\n flag_true_false = False\n flag_tag_notfound = False\n\n # set the flag\n temp_line = raw_data_file.readline().strip().strip('\\r\\n')\n temp_clean_url = original_url[:-8] + temp_line\n temp_response = self.api_dealer(temp_clean_url)\n if temp_response == 't' or temp_response == 'f':\n flag_true_false = True\n else:\n flag_tag_notfound = True\n\n # process the file\n for line in raw_data_file:\n clean_line = line.strip().strip('\\r\\n')\n if clean_line == '':\n tk.messagebox.showinfo('info', 'end of file or '\n 'unexpected newline in '\n 'the end')\n break\n # get rid of the '<target>' ending of the original url\n # and combine with the real target\n clean_url = original_url[:-8] + clean_line\n response = self.api_dealer(clean_url)\n\n # deal with different kinds of output and update the counter\n if response == 't':\n count_true += 1\n elif response == 'f':\n count_false += 1\n elif response == 'Not Found':\n count_not_found += 1\n else:\n count_tag += 1\n\n # create the output row and write to file\n output_row = clean_line + ' ' + response + '\\r\\n'\n processed_data_file.write(output_row)\n\n # now output the ratio\n if flag_true_false:\n self._ratio_var.set(str(count_true) + '/' + str(\n count_true + count_false))\n elif flag_tag_notfound:\n self._ratio_var.set(str(count_tag) + '/' + str(count_tag\n + count_not_found))\n\n tk.messagebox.showinfo('message', 'job done! have a nice day!')\n\n except Exception as e:\n tk.messagebox.showerror('error', e)", "def test_get_results(self):\n pass", "def run_search(dict_file, postings_file, queries_file, results_file):\n print('running search on the queries...')\n\n # Configuration Parameter Settings\n global_config = {\n # Weights assigned to term in each zone\n 'zones': {\n \"use_zones\": True,\n \"weights\": {\n 'title': 0.4,\n 'content': 0.4,\n 'date_posted':0.1,\n 'court': 0.1\n }\n },\n\n # Rocchio Query Refinement Configuration Settings\n 'rocchio': {\n \"use_rocchio\": True,\n \"rocchio_alpha\": 1,\n \"rocchio_beta\": 0.2\n },\n\n # Wordnet Query Expansion Configuration Settings\n 'wordnet': {\n \"use_wordnet\": True,\n \"word_limit\": 10,\n \"weight\": 0.1\n }\n }\n\n # Initialise stemmer\n stemmer = PorterStemmer()\n\n # Open and load dictionary\n # Sorted index dictionary is {term : [termID,docFrequency,charOffSet,strLength]}\n # Document length dictionary is {docID: cosine normalized document length}\n # Relevant Documents dictionary is {docID: [relevant document vector]}\n # Collection size is the total number of documents, to be used for idf calculation\n in_dict = open(dict_file, 'rb')\n sorted_dict = pickle.load(in_dict)\n sorted_index_dict = sorted_dict[0]\n docLengths_dict = sorted_dict[1]\n relevantDocs_dict = sorted_dict[2]\n collection_size = sorted_dict[3]\n\n # Open posting lists, but not loaded into memory\n postings = open(postings_file, 'r')\n\n queries = []\n queries_groundtruth_docs_list = []\n\n # Open input queries file\n with open(queries_file,'r') as f:\n query = (f.readline()).strip()\n queries.append(query)\n\n query_groundtruth_docs = f.readlines()\n query_groundtruth_docs = [int(x.strip()) for x in query_groundtruth_docs]\n queries_groundtruth_docs_list.append(query_groundtruth_docs)\n\n # Process each query and store the results in a list\n query_results = []\n for query_index, query in enumerate(queries):\n\n # List of relevant docIds as labeled for the query\n query_groundtruth_docs = queries_groundtruth_docs_list[query_index]\n\n # Store all normalized query tf-idf weights in query_dict\n query_dict = process_query(\n query, sorted_index_dict, collection_size, stemmer, global_config, query_groundtruth_docs, relevantDocs_dict)\n\n # Store all document tf weights in document_dict\n document_dict = process_documents(\n query_dict, sorted_index_dict, postings)\n\n # Generates the relevant documents for the query\n scores = process_scores(\n query_dict, document_dict, docLengths_dict)\n\n query_results.append(scores)\n\n # Write query results into output results_file\n with open(results_file, 'w') as results_file:\n for result in query_results:\n result_line = \"\"\n # If result is empty, just write an empty line\n # If result is not empty, write each documentID (starting from highest rank) with a whitespace separating each documentID\n if result is not None:\n for docID, score in result:\n result_line = result_line + str(docID) + ' '\n # Remove final whitespace in results line\n results_file.write(result_line[:-1])\n # Ensure final result does not print new line in results file\n if result != query_results[-1]:\n results_file.write('\\n')\n print('done!')", "def search(self, request):\n file = self.request.data['file']\n with open(file) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n # creating a new file output.txt for storing results.\n output = open('Output.txt', 'w+')\n # write all matching results while iterating over queries item\n for query in content:\n output.write(\"Matches for: %s\\n\" % query)\n # filtering out all the exact matches from phone-book in sorted manner by first name.\n matches = self.queryset.filter(last_name__icontains=query).order_by('first_name')\n if not matches:\n output.write(\"No results found\")\n else:\n for count, result in enumerate(matches):\n output.write(\"Result {0}: {1}, {2}, {3}, {4}\\n\".format(\n count, result.last_name, result.first_name, result.state, result.phone_number\n ))\n # closing file after final iteration of all queries in a file.\n output.close()\n file_handle = output.open()\n response = FileResponse(file_handle, content_type='text/plain')\n response['Content-Length'] = file_handle.size\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % file_handle.name\n return Response(response)", "def automate_search(filename, searchlimit):\n sc = readsearchcriteria(filename)\n\n data = savequerydata(sc, filename, searchlimit)\n\n if data is not None:\n print('saveQueryData Error')\n return data\n\n generate_csv(filename)", "def run_check(output_file,\n documents,\n queries,\n results,\n ext=\".xhtml\",\n formula_bags=False,\n keep_words=True,\n keep_math=True\n ):\n with open(output_file, \"w+\") as out:\n analyzer = Analyzer(formula_bags=formula_bags,\n keep_words=keep_words,\n keep_math=keep_math)\n queries = Queries(queries).get_queries()\n results = Results(results)\n print(\"{},{},{},{},{},{},{},{},{},{},{}\".format(\"Query\",\n \"Document\",\n \"Doc-Length\",\n \"Ranking\",\n \"Span\",\n \"Min-Span\",\n \"Normalized-Span\",\n \"Normalized-Min-Span\",\n \"Min-Distance\",\n \"Ave-Distance\",\n \"Max-Distance\"),\n file=out)\n undefined_docs = []\n for q in tqdm(range(0, len(queries))):\n query = queries[q]\n for doc in results.documents_for_query(query):\n try:\n document = Document(os.path.join(documents, doc + ext))\n (tf_dic, __) = document.lookup_dictionaries(analyzer)\n relevant = lookup_relevant(results.find_score(query, doc))\n try:\n dist = calculate_distances(query, tf_dic)\n doc_length = sum([len(tf_dic[key])\n for key in tf_dic.keys()])\n print(\"{},{},{},{},{}\".format(query,\n document,\n doc_length,\n relevant,\n \",\".join([str(d)\n for d in dist])),\n file=out)\n except DistancesUndefinedException:\n undefined_docs.append((document, relevant, query))\n except FileNotFoundError:\n print(\"Error in opening document: {}\".format(doc))\n print(\"Documents with undefined Distances\")\n for doc in undefined_docs:\n print(\"{}:{}:{}\".format(doc[2], doc[0], doc[1]))", "def run_queries_file(self, file_path, semantic_flag, city_choice,stem, result_path=\"\"):\n api = datamuse.Datamuse()\n\n with open(file_path , \"r\") as q:\n queries = dict()\n queries_list = q.read().split(\"</top>\")\n for query in queries_list:\n if query == \"\":\n continue\n tmp = query.split(\"<title>\")\n query_number= tmp[0].split(':')[1].replace('\\n','').replace(' ','')\n tmp= tmp[1].split(\"<desc>\")\n query_content =tmp[0].replace('\\n',' ')\n semantic_words = \"\"\n # ask from api the synonyms of each word on query\n if semantic_flag:\n for word in query_content.split():\n synonyms = api.suggest(s=word, max=1)\n for item in synonyms:\n if item[\"word\"] != word.lower():\n if item[\"word\"].split()[0] == word.lower():\n item[\"word\"]=item[\"word\"].split()[1]\n semantic_words += \" \" + item[\"word\"]\n #add the synonyms into query content\n query_content += semantic_words\n #add the description into query content\n queries[query_number] = self.remove_stop_words(query_content+tmp[1].split(\"<narr>\")[0][12:].replace('\\n',' '))\n\n p = Parse.Parser(self.stop_words)\n searcher = Searcher(queries, self.term_dictionary if not stem else self.term_dictionary_with_stemming, self.documents, self.avgl, self.posting_and_dictionary_path,p)\n\n results = searcher.run(city_choice,stem)\n #write the results to disk\n if result_path != \"\":\n self.write_results_to_disk(result_path,results)\n return results", "def query(self):", "def step010():\n logger.logMessage('Begin: Getting candidate documents from elasticsearch')\n\n def limitHour(d):\n thish = d.start_time.tz_localize(tz='UTC')\n nexth = thish + dt.timedelta(hours=1)\n return { 'range': { 'time': {'gte':thish, 'lt':nexth } } }\n \n conn = sql.create_engine(pgurl)\n client = es.Elasticsearch(hostlist)\n dupesDF = pd.read_sql_table('weather_dupes',conn).set_index('time')\n hours =dupesDF.to_period('H').reset_index()['time'].unique()\n ranges = [ limitHour(h) for h in hours ]\n query = { \n '_source': [ 'tsa','time' ],\n 'query': { \n 'bool': { 'should': ranges } \n } \n }\n #logger.logMessage(level='DEBUG',message='Query body: {0}'.format(query))\n hits = eshelp.scan(client=client,index=indexName,doc_type='doc',query=query)\n numRecs = 0\n with open(candidatesFile,'w') as f:\n for h in hits:\n src = h['_source']\n tsa = int(src['tsa'])\n time = src['time']\n docid = h['_id']\n idx = h['_index']\n f.write(f'{tsa:014d};{time:25s};{docid:32s};{idx:32s}\\n') \n numRecs += 1\n if numRecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records written\".format(numRecs))\n logger.logMessage(message=\"{0:9d} total records written\".format(numRecs))\n logger.logMessage('End: Getting candidate documents from elasticsearch')", "def fileCheckOriginal():\n\n print('[+] Populating File Hasing for later check')\n for url in check_files:\n try:\n data = query(url)\n file_name = url.split(\"/\")[-1]\n _,tmp_file = tempfile.mkstemp(prefix=\"exitmap_%s_\" % file_name)\n\n with open(tmp_file, \"wb\") as fd:\n fd.write(data)\n print('[+] Saving File \\\"%s\\\".' % tmp_file)\n check_files_patch_results.append( File_Check_Results(url, file_name, tmp_file, \"NO\", sha512_file(tmp_file)) )\n print('[+] First Time we see the file..')\n print(' |_________> exitnode : None' )\n print(' |_________> :url: %s' % str(url) )\n print(' |_________> :filePath: %s' % str(tmp_file))\n print(' |_________> :file Hash: %s' % str(sha512_file(tmp_file)))\n except Exception as err:\n print('[-] Error ! %s' % err)\n traceback.print_exc()\n pass\n return time.time()", "def query3() :", "def main():\n\n global final_dictionary\n global final_doc_set\n\n input_query = input(\"Please enter query for search: \")\n\n # Retrieving positional inverted index for query terms\n final_dictionary = fetch_dictionary(input_query.lower()) # Query is converted to lowercase as pre-process step\n\n #The final set of document IDs is retrieved below\n fetch_posting_list(input_query)\n sc = tf_idf_score()\n output = fetch_document_contents(input_query, sc)\n print(output)\n output_file = open(RESULT_FILE, 'a')\n output_file.write(output)\n output_file.write('\\n##############################################################\\n')\n output_file.close()\n\n print(\"Query results also appended to file: {0}\".format(RESULT_FILE))", "def query_server(query_list, run_id, url):\n out_str = \"\"\n for query in query_list:\n payload = {\n 'runID': run_id,\n 'TextID': query[0],\n 'Text': query[1]\n }\n resp = requests.post(url, data=payload)\n if resp.status_code != 200:\n print \"Breaking due to non-200 response\"\n break\n out_str += resp.content\n \n f_path = os.path.join(\"evaluation/system_output\", run_id + \".tsv\")\n abs_path = os.path.abspath(f_path)\n with open(abs_path, 'w') as f:\n f.write(out_str[1:]) # strip initial newline\n \n return abs_path", "def run_search(dict_file, postings_file, queries_file, results_file):\n print('running search on the queries...')\n\n with open(dict_file, mode=\"rb\") as dictionary_file,\\\n open(postings_file, mode=\"rb\") as posting_file,\\\n open(queries_file, encoding=\"utf8\") as q_in,\\\n open(results_file, mode=\"w\", encoding=\"utf8\") as q_out:\n\n ''' \n load dictionary and postings \n - num_of_doc -> The number of the documents indexed\n - dict(k,v) -> token, Enftry(frequency, offset, size)\n - postings -> list of tuples (doc ID, token frequency)\n '''\n num_of_doc = pickle.load(dictionary_file)\n dictionary = pickle.load(dictionary_file)\n postings = Posting(dictionary, posting_file)\n\n ''' \n process query, and write the query result (i.e., the 10 \n most relevant doc IDs) to the result file \n '''\n for query in q_in:\n print(*find_10_most_relevant(query, dictionary,\n postings, num_of_doc), end='\\n', file=q_out)", "def _run_query(self):", "async def _request_one(self, url, header, id, index, session):\n async with session.get(url=url, headers=header) as resp:\n status = resp.status\n if status == '200':\n self.logger.info(\"[%s] Request for url %s, header: %s\", index, url, header)\n result = await resp.text()\n\n # successful\n self.logger.info(\"[%s] [Successful] Request Success for url %s\", index, url)\n if id not in self.processed_ids:\n self.processed_ids.add(id)\n\n async with aiofiles.open(self.result_file_dir + id + \".txt\", \"w\") as f:\n await f.write(result)\n self.logger.info(\"[%s] Wrote results for source URL: %s\",index, url)\n else:\n self.logger.info(\"[%s] [ERROR] Request error for url %s, status %s\", index, url, resp.status)\n if resp == '429':\n time.sleep(1000)", "def search_for_adaptation():\n\n book_id = 0\n # variables for status results; 0 for no error, 1 for no book found, 2 for no movie found,\n # 3 for no tv show found, 4 for no tv show and movie found\n status_msg = \"\"\n status_num = 0\n\n # if the Random Book button is chosen, then select a random book from the list\n # try to match the book with a movie or tv show until one is found\n if request.args.get('random') == \"1\":\n search_term = data_functions.get_random_book()\n else:\n # if search input is used, then get the search term\n search_term = request.form['search'] # get search term from input box\n\n # Goodreads API functions\n gr_result = API_functions.request_book(search_term) # use function in API_functions.py\n\n # if no book is found, generate status code\n if gr_result[\"total\"] == 0:\n status_msg = \"No matching book found for {0}. Try another.\".format(search_term)\n status_num = 1\n\n # TheMovieDB functions\n movie_result = {} # empty dictionary\n tv_result = {} # empty dictionary\n if status_num == 0: # only continue if there is a book found\n # search for movie\n # use function in API_functions.py\n movie_result = API_functions.request_movie(gr_result[\"name_split\"], gr_result[\"author_name_clean\"], 0)\n\n if movie_result[\"total_results\"] != 0: # if a movie is found, save some of its data\n movie_id = movie_result[\"id\"] # save movie ID\n\n else: # if no movie is found, generate status message\n status_msg = \"No movie found. Try another.\"\n status_num = 2\n\n # search for TV show\n # use function in API_functions.py\n tv_result = API_functions.request_tv_show(gr_result[\"name_split\"], gr_result[\"author_name_clean\"], 0)\n\n if tv_result[\"total_results\"] != 0: # if a tv show is found, save some of its data\n tv_id = tv_result[\"id\"] # save tv ID\n\n else: # if no tv show is found, generate status message\n status_msg = \"No TV Show found. Try another.\"\n status_num = 3\n\n if movie_result[\"total_results\"] == 0 and tv_result[\"total_results\"] == 0:\n # if no movie and tv show found, generate status message.\n # in the case they are found, but not based on the book, generate the same message\n status_msg = \"No adaptation found for {0}. Try another.\".format(search_term)\n status_num = 4\n\n if previous_searches.count(\n gr_result[\"name_split\"]) == 0 and status_num != 4: # only add if book name is not in deque\n if len(previous_searches) == 5: # keep the deque at only five most recent searches\n previous_searches.pop() # remove one if there is already five\n previous_searches.appendleft(gr_result[\"name_split\"]) # add recent search to beginning of deque\n # render the page again with updated information, pass all data to render_template method\n return render_template(\"index.html\", book_id=book_id, book_data=gr_result, movie_data=movie_result,\n tv_data=tv_result, app_name=app_name, search=search_term, status_msg=status_msg,\n status_num=status_num, previous_searches=previous_searches)", "def extract():\n queries = querylist_builder()\n \n pathlib.Path('/tmp/street_data').mkdir(parents=True, exist_ok=True) \n for i,q in enumerate(queries):\n print(\"running extract query\")\n url = ENDPOINT + \"?CommandData=\" + q\n print(url)\n r = requests.get(url)\n text_file = open(\"/tmp/street_data/\" + str(i) + \".xml\", 'w')\n data = r.text\n print(data)\n text_file.write(data) \n print(\"data saved for {}\".format(str(i)))\n text_file.close()", "def searchNameAlldb():\n if(OptionConfiguration.methodSentData==\"GET\"):\n #print(\"ci\")\n numeroRigheDb=countValueofTableGet(informationSchema.tabellaDB,None)\n print (\"\")\n #print (numeroRigheDb)\n if(numeroRigheDb!=None):\n print \"Num of DB -> \"+str(numeroRigheDb)\n print (\"\")\n valori=searchValueofTableGet(numeroRigheDb,informationSchema.tabellaDB,informationSchema.colonnaNomeDb,None)\n #print (valori)\n if(valori!=None and len(valori)>0):\n\n fileWrite = open(\"DBname.txt\", 'w')\n for value in valori:\n fileWrite.write(str(value)+'\\n')\n fileWrite.close()\n #print (numeroRigheDb)\n print(OptionConfiguration.bcolors.BOLD+\"Value write on DBname.txt\"+OptionConfiguration.bcolors.ENDC)\n\n else:\n print(OptionConfiguration.bcolors.BOLD + OptionConfiguration.bcolors.FAIL + \"Error nothing value found\" + OptionConfiguration.bcolors.ENDC)\n else:\n print(OptionConfiguration.bcolors.BOLD+\"Value write on \"+OptionConfiguration.destination[0]+\"/DBname.txt\"+OptionConfiguration.bcolors.ENDC)\n\n elif(OptionConfiguration.methodSentData==\"POST\"):\n numeroRigheDb=countValueofTablePost(informationSchema.tabellaDB,None)\n #print (numeroRigheDb)\n print (\"\")\n if (numeroRigheDb!=None):\n print (\"Num of DB -> \"+ str(numeroRigheDb))\n print (\"\")\n valori=searchValueofTablePost(numeroRigheDb,informationSchema.tabellaDB,informationSchema.colonnaNomeDb,None)\n print (valori)\n if(valori!=None):\n\n fileWrite = open(\"DBname.txt\", 'w')\n for value in valori:\n fileWrite.write(str(value)+'\\n')\n fileWrite.close()\n #print (numeroRigheDb)\n print(OptionConfiguration.bcolors.BOLD+\"Value write on DBname.txt\"+OptionConfiguration.bcolors.ENDC)\n\n else:\n print(OptionConfiguration.bcolors.BOLD + OptionConfiguration.bcolors.FAIL + \"Error nothing value found\" + OptionConfiguration.bcolors.ENDC)\n else:\n print(OptionConfiguration.bcolors.BOLD+OptionConfiguration.bcolors.FAIL+\"Error num of rows not found\"+OptionConfiguration.bcolors.ENDC)", "def main():\n\n # generate a token, we will be sending several queries off\n token = gen_token()\n # build the query string\n s_d, e_d = prev_quarter_boundaries(datetime.datetime.utcnow())\n s_str = s_d.strftime(\"%Y-%m-%d\")\n e_str = e_d.strftime(\"%Y-%m-%d\")\n query_str = (\n 'filingsource:\"Taiwan TWSE\" AND ' +\n 'enddate:[' + s_str + ' TO ' +\n e_str + ']'\n )\n # pull docs\n docs_res = documents_stringquery(query_str, False, token=token)\n # read out number of hits\n num_filings = docs_res['totalHits']\n # print it out\n print('Filing count from last quarter: ' + str(num_filings))", "def query(self):\r\n reports = self.get_relevant_reports()\r\n new_files = self.construct_report_dict(reports)\r\n updated, new_reports = self.is_updated(new_files, self.old_files)\r\n if len(self.old_files) != 0 and updated:\r\n self.process_changes(new_reports)\r\n self.old_files = new_files", "def handle_file(self):\n query_cache_response = self.check_av_cache()\n cache_status_label = query_cache_response['response'][0]['status']['label']\n if cache_status_label == \"FOUND\":\n print(\"Results already exist in AV cache for file {}\".format(self.file_name))\n self.final_response = query_cache_response\n self.final_status_label = cache_status_label\n else:\n print(\"No results in AV cache before upload for file {}\".format(self.file_name))\n upload_response = self.upload_file()\n upload_status_label = upload_response[\"response\"][0][\"status\"][\"label\"]\n if upload_status_label == \"UPLOAD_SUCCESS\":\n query_response = self.query_file()\n query_status_label = query_response[\"response\"][0][\"status\"][\"label\"]\n print(\"Receiving Query response with av results for file {}. status: {}\".format(self.file_name,\n query_status_label))\n self.final_response = query_response\n self.final_status_label = query_status_label\n else:\n self.final_response = upload_response\n self.final_status_label = upload_status_label\n self.create_response_info(self.final_response)\n if self.final_status_label == \"FOUND\":\n signature = self.final_response[\"response\"][0][\"av\"][\"malware_info\"][\"signature_name\"]\n if signature:\n print(\"File {} was found malicious by AV. Signature : {}\".format(self.file_name, signature))\n else:\n print(\"File {} was found clean by AV\".format(self.file_name))", "def collect(self, query_list):\n try:\n while True:\n # For each app, we iterate through the query list and make the API request.\n for current_app in range(self.number_apps):\n for q_index, current_query in enumerate(query_list):\n response = self.request_data(search_query=current_query, app_index=current_app)\n if utils.validate_response(response):\n # STORE DATA\n data = response.json()\n print(data.keys())\n #pass\n else:\n raise BadResponseError()\n except KeyboardInterrupt:\n return -1\n except BadResponseError:\n return -1", "def check_results(request):\n \n # Check if an ID was supplied.\n if ('ID' not in request.GET):\n response = HttpResponse()\n response.status_code = 400 # Bad Request\n response.reason_phrase = (\"No ID was passed. The ID used to start \"\n \"the classification job must be sent to \"\n \"check the progress. The ID should be \"\n \"passed in a parameter named 'ID'.\")\n return response\n \n # Ensure a file exists with the specified ID.\n id = request.GET['ID']\n if (not File.objects.filter(file_name=id).exists()):\n response = HttpResponse()\n response.status_code = 400 # Bad Request\n response.reason_phrase = ('The passed ID was invalid. If the ID you '\n 'sent was returned by a validate request, '\n 'it is possible the ID has expired and the '\n 'job was deleted.')\n \n # Retrieve the job for the requested file.\n file = File.objects.get(file_name=id)\n job = file.job\n \n # If the job is complete, send the results. Otherwise, send all of the\n # updates for the job.\n has_result = JobResult.objects.filter(job=job).exists()\n return job_results(request, job) if has_result else \\\n job_updates(request, job)", "def getTestResults():", "def test_output_results(self, mock_output_results_to_db):\n self.scanner._output_results(fasd.AUDIT_LOGGING_VIOLATIONS)\n\n mock_output_results_to_db.assert_called_once_with(\n self.scanner, fasd.FLATTENED_AUDIT_LOGGING_VIOLATIONS)", "def query(self, query):", "def run(self):\n\t\tlog = logging.getLogger()\n\t\tsuccess = True\n\t\tself.task[\"custom\"] = str(self.task[\"custom\"])\n\t\tself.db = CuckooDatabase()\n\n\t\t# Generate analysis results storage folder path with current task id.\n\t\tresults_path = CuckooConfig().get_analysis_results_path()\n\t\tsave_path = os.path.join(results_path, str(self.task[\"id\"]))\n\n\t\tif (self.task[\"custom\"] == \"sleep\"):\n\t\t\timport time\n\t\t\t# sleep longer than default timeout of hsn2-cuckoo\n\t\t\ttime.sleep(905)\n\t\t# Additional check to verify that the are not saved results with the\n\t\t# same task ID.\n\t\tif os.path.exists(save_path):\n\t\t\tlog.error(\"There are already stored results for current task \" \\\n\t\t\t\t\t \"with ID %d at path \\\"%s\\\". Abort.\"\n\t\t\t\t\t % (self.task[\"id\"], save_path))\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target file exists.\n\t\tlog.debug(os.path.exists(self.task[\"custom\"]))\n\t\tif not os.path.exists(self.task[\"custom\"]):\n\t\t\tlog.error(\"Cannot find custom file \\\"%s\\\". Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target is a directory.\n\t\tif os.path.isdir(self.task[\"custom\"]):\n\t\t\tlog.error(\"Specified target \\\"%s\\\" is a directory. Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# 4. Extract appropriate log archive as mock logs analysis results\n\t\t# Modified _save_results so that it extracts the tar file passed in target\n\t\tself._save_results(self.task[\"custom\"], save_path)\n\n\t\t# 5. Update task in database with proper status code.\n\t\tif success:\n\t\t\tself.db.complete(self.task[\"id\"], True)\n\t\telse:\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\tlog.info(\"Analyis completed.\")\n\n\t\treturn True", "def execute(self, req):\n\t\tmyPath = req[\"url\"].replace(self.settings[\"ns\"][\"local\"], \"\", 1).split(\"/\")\n\t\tfile = myPath.pop(0)\n\t\tcurrentDir = getcwd()\n\t\tservice = self.basedir + file\n\t\turi = req[\"url\"]\n\t\tqueryPath = \"%s/queries/\" % service\n\t\ttemplatePath = \"%s/\" % service\n\t\ttemplateName = self.mime.getExtension(req[\"request\"].accept_mimetypes.best)\n\t\ttry:\n\t\t\tonlyfiles = [f for f in listdir(queryPath) if isfile(join(queryPath, f))]\n\t\texcept OSError:\n\t\t\tprint \"Warning: Can't find path %s for queries.\" % templatePath\n\t\t\tonlyfiles = []\n\t\tqueries = {}\n\t\tfirst={}\n\t\tfor root, dirs, files in walk(queryPath):\n\t\t\tfor filename in files:\n\t\t\t\ttry:\n\t\t\t\t\tcurrentEndpoint = \"local\"\n\t\t\t\t\t_aux = root.rstrip(\"/\").split(\"/\").pop()\n\t\t\t\t\tif _aux != \"queries\":\n\t\t\t\t\t\tcurrentEndpoint = _aux\n\t\t\t\t\tif not filename.endswith(\".query\"):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tsqlQuery = self.env.get_template(\"%s/%s\" % (root, filename))\n\t\t\t\t\trenderedSqlQuery = sqlQuery.render(queries=queries, first=first, uri=uri, session=session, flod=self.flod, args=myPath)\n\t\t\t\t\tif re.match(\"^\\s*select\", renderedSqlQuery, flags=re.IGNORECASE) is None:\n\t\t\t\t\t\treturn {\"content\": \"Not a valid SQL Select query\", \"status\": 500}\n\t\t\t\t\tresults = self.sqlserver.query(renderedSqlQuery, currentEndpoint)\n\t\t\t\t\t_name = filename.replace(\".query\", \"\")\n\t\t\t\t\tqueries[_name] = []\n\t\t\t\t\tif results is not None:\n\t\t\t\t\t\tqueries[_name] = results\n\n\t\t\t\texcept Exception, ex:\n\t\t\t\t\tprint sys.exc_info()\n\t\t\t\t\tprint ex\n\t\t\t\t\treturn {\"content\": \"A problem with the SQL endpoint occurred\", \"status\": 500}\n\t\tchdir(currentDir)\n\t\ttry:\n\t\t\tif templateName == \"json\" and not isfile( \"%s%s.template\" % (templatePath, templateName)):\n\t\t\t\tout = json.dumps(queries)\n\t\t\telse:\n\t\t\t\tcontent = self.env.get_template(\"%s%s.template\" % (templatePath, templateName))\n\t\t\t\tout = content.render(queries=queries, uri=uri, session=session, flod=self.flod, args=myPath)\n\t\texcept Exception:\n\t\t\tprint sys.exc_info()\n\t\t\treturn {\"content\": \"Rendering problems\" , \"status\": 500}\n\t\treturn {\"content\": out, \"mimetype\": \"text/html\"}", "def search_results():\n skip = int(flask.request.args.get(\"skip\", \"0\"))\n limit = int(flask.request.args.get(\"limit\", \"20\"))\n\n obj = {}\n\n # query : will be event kit in case of triage information\n uidstr = flask.request.args.get(\"query\", None)\n\n if uidstr == None:\n obj[\"error\"] = \"Missing search ID\"\n\n uidstr = json.loads(uidstr)\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uidstr\n obj[\"clips\"] = []\n states = backend.get_search_sessions()\n obj[\"sessions\"] = []\n for astate in states:\n obj[\"sessions\"].append(str(astate))\n try:\n uid = uuid.UUID(uidstr)\n state = backend.get_iqr_search_state(uid)\n # use the uid of the state and get the information from the database\n col = str(state.uuid)\n obj[\"collection\"] = col\n searchdb[col].ensure_index([(\"model_id\", pymongo.ASCENDING),(\"probability\", pymongo.DESCENDING) ])\n # Force probabilities\n obj[\"positives\"] = list(state.positives)\n obj[\"negatives\"] = list(state.negatives)\n log = \"\"\n for id in state.positives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 1.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 1.0001\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n\n for id in state.negatives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 0.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 0.0\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n obj[\"log\"] = log\n\n allres = searchdb[col].find({\"model_id\" : \"FUSION\"}).sort([(\"probability\", pymongo.DESCENDING)]).skip(skip).limit(limit)\n rank = skip + 1\n for one in allres:\n aclip = {}\n aclip[\"score\"] = one[\"probability\"]\n aclip[\"id\"] = \"HVC\" + str(one[\"clip_id\"]).zfill(6)\n clipobj = db[\"clips\"].find_one({\"id\" : \"HVC\" + str(one[\"clip_id\"]).zfill(6)},{\"duration\" : 1})\n aclip[\"duration\"] = clipobj[\"duration\"]\n aclip[\"rank\"] = rank\n rank = rank + 1\n obj[\"clips\"].append(aclip)\n obj[\"count\"] = len(obj[\"clips\"])\n\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"next\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid, \"skip\" : skip+limit } )\n return jsonify(obj)", "def query(self, n_jobs=1) -> str:\n\n def get_one_answer(file):\n return json.dumps(ask_endpoint(file, os.path.join(self.url, \"take_exam\")))\n\n # send each file to the endpoint\n query_start_time = time.time()\n answers = Parallel(n_jobs=n_jobs)(delayed(get_one_answer)(file) for file in tqdm(self.filelist))\n query_end_time = time.time()\n query_time = query_end_time - query_start_time\n\n # put all answers to the dataframe\n answers = pd.DataFrame(answers, columns=[\"prediction\"])\n answers[\"prediction\"] = answers[\"prediction\"].apply(lambda x: json.loads(x))\n answers[\"path\"] = self.filelist\n\n # create report folder\n os.makedirs(self._report_path, exist_ok=False)\n # save raw answers\n answers.to_csv(os.path.join(self._report_path, \"raw_answers.csv\"), index=False)\n # parse answers\n parsed_answers = pd.DataFrame(columns=[\"path\",\n \"id\",\n \"prediction\"])\n for _, row in answers.iterrows():\n for k, v in row[\"prediction\"][\"answers\"].items():\n parsed_answers.loc[len(parsed_answers)] = [row[\"path\"], int(k), v]\n # save parsed answers\n parsed_answers = parsed_answers.sort_values(by=[\"path\", \"id\"]).reset_index(drop=True)\n parsed_answers.to_csv(os.path.join(self._report_path, \"parsed_answers.csv\"), index=False)\n # save statistics\n stats = {\n \"readiness_time\": self._readiness_time,\n \"query_total_files\": len(self.filelist),\n \"query_total_time\": query_time,\n \"query_n_jobs\": n_jobs,\n \"query_mean_latency\": query_time / len(self.filelist) * n_jobs,\n \"query_rps\": len(self.filelist) / query_time\n }\n with open(os.path.join(self._report_path, \"stats.json\"), \"w\") as f:\n json.dump(stats, f)\n\n return self._report_path", "def processing():\r\n err=test_url()\r\n of=open(default_output_path,'a+')\r\n if err!=0: \r\n metaf=scan_target_metatag()\r\n adminf=scan_admin_url()\r\n comf=scan_com_content()\r\n robotsf=scan_robots_txt()\r\n htf=scan_htaccess()\r\n moof=scan_mootools()\r\n engbf=scan_engb_ini()\r\n result=compute_result(metaf,adminf,comf,robotsf,htf,moof,engbf)\r\n if result==1:\r\n #print \"THE TARGET IS USING JOOMLA CMS\"\r\n #print \"Joomla version is \" + joomla_version\r\n of.write(\"\\nJOOMLA USED (version : \" + joomla_version + \") --> \" + provided_url + \"\\n\")\r\n else:\r\n #print \"JOOMLA NOT USED\"\r\n of.write(\"\\nJOMLA NOT USED --> \" + provided_url + \"\\n\")\r\n else:\r\n of.write(\"\\nBAD URL --> \" + provided_url + \"\\n\")\r\n of.close()\r\n return 0", "def search_api():\n query = request.args.get(\"url\", \"\", type=str)\n return_html = str_to_bool(request.args.get(\"result\", \"false\", type=str))\n show_stats = str_to_bool(request.args.get(\"stats\", \"false\", type=str))\n info = str_to_bool(request.args.get(\"info\", \"true\", type=str))\n check_all = str_to_bool(request.args.get(\"checkall\", \"false\", type=str))\n favicon = str_to_bool(request.args.get(\"favicon\", \"false\", type=str))\n return_opml = str_to_bool(request.args.get(\"opml\", \"false\", type=str))\n force_crawl = str_to_bool(request.args.get(\"force\", \"false\", type=str))\n check_feedly = str_to_bool(request.args.get(\"feedly\", \"true\", type=str))\n skip_crawl = str_to_bool(request.args.get(\"skip_crawl\", \"false\", type=str))\n\n g.return_html = return_html\n\n url: URL = validate_query(query)\n\n start_time = time.perf_counter()\n\n search_runner = SearchRunner(\n db_client=db_client,\n check_feedly=check_feedly,\n force_crawl=force_crawl,\n check_all=check_all,\n skip_crawl=skip_crawl,\n )\n feed_list: List[CustomFeedInfo] = search_runner.run_search(url)\n stats = search_runner.crawl_stats\n\n search_time = int((time.perf_counter() - start_time) * 1000)\n stats[\"search_time\"] = search_time\n app.logger.info(\"Ran search of %s in %dms\", url, search_time)\n\n if not feed_list and no_response_from_crawl(stats):\n raise NotFoundError(f\"No Response from URL: {url}\")\n\n result: Dict = {}\n if feed_list:\n try:\n kwargs = {}\n if not info:\n kwargs[\"only\"] = [\"url\"]\n if not favicon:\n kwargs[\"exclude\"] = [\"favicon_data_uri\"]\n\n feed_schema = ExternalFeedInfoSchema(many=True, **kwargs)\n\n feed_list = sorted(feed_list, key=lambda x: x.score, reverse=True)\n dump_start = time.perf_counter()\n result = feed_schema.dump(feed_list)\n dump_duration = int((time.perf_counter() - dump_start) * 1000)\n app.logger.debug(\n \"Schema dump: feeds=%d duration=%dms\", len(result), dump_duration\n )\n stats[\"dump_time\"] = dump_duration\n except ValidationError as err:\n app.logger.warning(\"Dump errors: %s\", err.messages)\n abort(500)\n\n if show_stats:\n result = {\"feeds\": result, \"search_time_ms\": search_time, \"crawl_stats\": stats}\n\n if return_html:\n return render_template(\n \"results.html\",\n feeds=feed_list,\n json=get_pretty_print(result),\n url=url,\n stats=get_pretty_print(stats),\n )\n elif return_opml:\n opml_result = output_opml(feed_list).decode(\"utf-8\")\n return Response(opml_result, mimetype=\"text/xml\")\n\n return jsonify(result)", "def query(self, n_jobs=1) -> str:\n\n def get_one_answer(file):\n try:\n ans = ask_endpoint(file, os.path.join(self.url, \"predict\"))\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n ans = {}\n return json.dumps(ans)\n\n # send each file to the endpoint\n # note that in case of many files parallel queries might speed up the processing drastically\n answers = Parallel(n_jobs=n_jobs)(delayed(get_one_answer)(file) for file in tqdm(self.filelist))\n\n # put all answers to the dataframe\n answers = pd.DataFrame(answers, columns=[\"answer\"])\n answers[\"answer\"] = answers[\"answer\"].apply(lambda x: json.loads(x))\n answers[\"path\"] = self.filelist\n\n # create report folder\n os.makedirs(self._report_path, exist_ok=False)\n # save raw answers\n answers.to_csv(os.path.join(self._report_path, \"raw_answers.csv\"), index=False)\n\n # parse answers\n parsed_answers = pd.DataFrame(columns=[\"path\",\n \"row_number\",\n \"prediction\"])\n for _, row in answers.iterrows():\n for _num, _pred in enumerate(row[\"answer\"][\"predictions\"]):\n parsed_answers.loc[len(parsed_answers)] = [row[\"path\"], int(_num), _pred]\n # save parsed answers\n parsed_answers = parsed_answers.sort_values(by=[\"path\", \"row_number\"]).reset_index(drop=True)\n parsed_answers.to_csv(os.path.join(self._report_path, \"parsed_answers.csv\"), index=False)\n\n return self._report_path", "def find_and_download_files(context):\n\n\n input_path = 'input/'\n if os.path.isdir(input_path):\n log.debug('Path already exists: ' + input_path)\n else:\n log.debug('Creating: ' + input_path)\n os.mkdir(input_path)\n\n fw = context.client\n\n if 'classification_measurement' in context.config:\n class_meas = context.config['classification_measurement'].split()\n else:\n class_meas = ['T1']\n\n # session and acquisition include/exclude lists can come from:\n # project info metadata,\n # subject info metadata, and\n # config options\n # The last one wins (how about getting it from an input file also, eh?)\n ses_exclude_list = None\n ses_include_list = None\n acq_exclude_list = None\n acq_include_list = None\n\n fs = 'freesurfer_longitudinal_'\n where = 'Found in project info'\n # check for exclude/include lists of regexs for sessions in project info\n sel = context.gear_dict['project'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['project'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in project info\n ael = context.gear_dict['project'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['project'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in subject info'\n # check for exclude/include lists of regexs for sessions in subject info\n sel = context.gear_dict['subject'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['subject'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in subject info\n ael = context.gear_dict['subject'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['subject'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in config'\n # set up exclude/include lists of reegexs for sessions in config\n if 'session_excludelist' in context.config:\n ses_exclude_list = context.config['session_excludelist'].split()\n log.info(where+' session_excludelist: \"'+str(ses_exclude_list)+'\"')\n if 'session_includelist' in context.config:\n ses_include_list = context.config['session_includelist'].split()\n log.info(where+' session_includelist: \"'+str(ses_include_list)+'\"')\n\n # set up exclude/include lists of reegexs for acquisitions in config\n if 'acquisition_excludelist' in context.config:\n acq_exclude_list = context.config['acquisition_excludelist'].split()\n log.info(where+' acquisition_excludelist: \"'+str(acq_exclude_list)+'\"')\n if 'acquisition_includelist' in context.config:\n acq_include_list = context.config['acquisition_includelist'].split()\n log.info(where+' acquisition_includelist: \"'+str(acq_include_list)+'\"')\n\n # go through all sessions, acquisitions to find files\n for session in context.gear_dict['subject'].sessions():\n\n lemme_out = False\n if ses_exclude_list:\n for regex in ses_exclude_list:\n if re.search(regex, session.label): # if excluded, skip\n log.info('Session \"' + session.label + '\" matches ' + \\\n 'exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if ses_include_list:\n match = False\n for regex in ses_include_list:\n if not re.search(regex, session.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Session \"' + session.label + '\" matches ' \\\n 'an inclusion regex, keeping it')\n\n for acquisition in fw.get_session_acquisitions(session.id):\n\n lemme_out = False\n if acq_exclude_list:\n for regex in acq_exclude_list:\n if re.search(regex, acquisition.label): # if excluded, skip\n log.info('Acquisition \"' + acquisition.label + \\\n '\" matches exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if acq_include_list:\n match = False\n for regex in acq_include_list:\n if not re.search(regex, acquisition.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Acquisition \"' + acquisition.label + '\" ' + \\\n 'matches an inclusion regex, keeping it')\n\n for afile in acquisition.files:\n\n # Scan must be nifti\n if afile.type == 'nifti':\n\n found_one = False\n for cm in class_meas:\n if 'Measurement' in afile.classification:\n if cm in afile.classification['Measurement']:\n found_one = True\n log.info('Found ' + cm + ' file')\n\n if found_one:\n download_it(fw, acquisition, afile.name, input_path)\n context.gear_dict['visits'].append(\n make_file_name_safe(session.label, '_'))\n else:\n log.info('Ignoring ' + afile.name)", "def query_into_file(self, query, fname=\"\", fields=None, parameters=None):\n target_url = self.build_query(query, fields=fields, parameters=parameters)\n\n with urllib.request.urlopen(target_url) as url:\n content = url.read()\n\n with open(fname, 'wb') as ofs:\n ofs.write(content)", "def api_request(update, oformat, stream, params, yr, mntlist, tstep, back):\n # open connection to era5 files db \n conn = db_connect(cfg)\n # create empty list to store cdsapi requests\n rqlist = []\n # list of faster ips to alternate\n ips = cfg['altips']\n i = 0 \n # assign year and list of months\n if type(yr) is list:\n yrs = yr\n else:\n yrs = [yr]\n\n if mntlist == []: \n mntlist = [\"%.2d\" % i for i in range(1,13)]\n # retrieve stream arguments\n dsargs = define_args(stream, tstep)\n era5log.debug(f'Stream attributes: {dsargs}')\n # get variables details from json file\n vardict = read_vars()\n # define params to download\n if update and params == []:\n params = dsargs['params']\n \n # according to ECMWF, best to loop through years and months and do either multiple\n # variables in one request, or at least loop through variables in the innermost loop.\n \n for y in yrs:\n # build Copernicus requests for each month and submit it using cdsapi modified module\n for mn in mntlist:\n # for each output file build request and append to list\n # loop through params and months requested\n for varp in params:\n queue, var, cdsname = define_var(vardict, varp, era5log)\n # if grib code exists but cds name is not defined skip var and print warning\n if not queue:\n continue\n # create list of filenames already existing for this var and yr\n nclist = []\n sql = \"select filename from file where location=?\" \n tup = (f\"{stream}/{var}/{y}\",)\n if tstep == 'mon':\n tup = (f\"{stream}/{var}/monthly\",)\n nclist += query(conn, sql, tup)\n era5log.debug(nclist)\n\n stagedir, destdir, fname, daylist = target(stream, var, y, mn, dsargs, tstep, back)\n # if file already exists in datadir then skip\n if file_exists(fname, nclist):\n era5log.info(f'Skipping {fname} already exists')\n continue\n rdict = build_dict(dsargs, y, mn, cdsname, daylist, oformat, tstep, back)\n rqlist.append((dsargs['dsid'], rdict, os.path.join(stagedir,fname),\n os.path.join(destdir, fname), ips[i % len(ips)])) \n # progress index to alternate between ips\n i+=1\n era5log.info(f'Added request for {fname}')\n if back:\n break\n \n era5log.debug(f'{rqlist}')\n\n # parallel downloads\n if len(rqlist) > 0:\n # set num of threads = number of params, or use default from config\n if len(params) > 1:\n nthreads = len(params)\n else:\n nthreads = cfg['nthreads']\n pool = ThreadPool(nthreads)\n results = pool.imap(do_request, rqlist)\n pool.close()\n pool.join()\n else:\n era5log.info('No files to download!')\n era5log.info('--- Done ---')", "def set_query_output(self, path):\n\n file = f'sql_query_R{str(self.time_span).replace(\".\", \"_\")} ({str(self.date_time).replace(\":\",\"_\")}).csv'\n self.query_output_file = path_inc(path, file)", "async def exec_write(self, query, *args):", "def execute(self):\n\n _logger.info('Setting up database connection and google doc access...')\n self._connect_to_rdr_replica()\n service_key_info = gcp_get_iam_service_key_info(self.gcp_env.service_key_id)\n gs_creds = gspread.service_account(service_key_info['key_path'])\n gs_file = gs_creds.open_by_key(self.doc_id)\n\n # These origin strings will be converted to lowercase when used as query filter values\n for origin in ['Vibrent', 'CareEvolution']:\n self._set_origin_value(origin)\n _logger.info(f'Retrieving consent validation records for {self.origin_value}.....')\n # consent_df will contain all the outstanding NEEDS_CORRECTING issues that still need resolution\n # start_date/end_date refer to the consent authored date range; the validation end date (when the\n # consent_file records were created) is up to a day later than the consent authored end date\n self.consent_df = self._get_consent_validation_dataframe(\n self.report_sql.format_map(SafeDict(start_date=self.start_date.strftime(\"%Y-%m-%d\"),\n end_date=self.end_date.strftime(\"%Y-%m-%d\"),\n validation_end_date=self.validation_end_date.strftime(\"%Y-%m-%d\"),\n report_date=self.report_date.strftime(\"%Y-%m-%d\"),\n origin_filter=self.origin_value.lower())))\n # Workaround: filtering out results for older consents where programmatic PDF validation flagged files\n # where it couldn't find signature/signing date, even though the files looked okay on visual inspection\n self.consent_df = self.remove_potential_false_positives_for_missing_signature(self.consent_df)\n\n # Get all the resolved/OBSOLETE issues for generating resolution stats\n self.resolved_df = self.get_resolved_consent_issues_dataframe()\n _logger.info('Generating report data...')\n self.create_weekly_report(gs_file)\n\n _logger.info('Report complete')\n self._clear_report()", "def query(self):\r\n records = self.input()\r\n if self.to_investigate:\r\n records = self.investigate(records)\r\n post.log.info(\"Caching {} records for {}\".format(len(records), self.name))\r\n self.cache_records(records)", "def write_results_to_disk(self, result_path,results):\n with open(result_path+\"/results.txt\",\"w+\") as out:\n\n for query_num in results:\n for doc_num in results[query_num]:\n out.write(str(query_num)+\" 0 \"+doc_num+\" 1 42.38 mt\\n\")\n out.close()", "def _execute(self):\n # Collect the results.\n results, _ = asyncio.run(\n apd.async_retrieve(\n self.args['pages'],\n self.args['from_'],\n self.args['to'],\n self.args['attempts'],\n self.args['backoff'],\n self.args['dump'],\n ))\n result_count = len(results)\n logger.info(f'Total: {result_count}')\n\n # Get the format and print the results.\n format_ = self.args['format_'].lower()\n formatter = Formatter(format_)\n formatter.print(results)", "def rest_api(self):\n self.__db_init('rest')\n api = self.__api_init()\n self.c.execute(\"SELECT MAX(id) FROM tweets\")\n db_max_id = self.c.fetchone()[0] \n try: \n most_recent = api.search(q=self.keyword, result_type='recent')[0].id\n except tweepy.TweepError as e:\n print(str(e.message[0]['message']) + \n ' Update api.ini with your proper credentials:')\n print(os.path.abspath(_path_finder('userconfig','api.ini')))\n sys.exit(-1)\n flag = 0\n while ( flag == 0 ):\n try:\n batch = 5000\n flag = batch\n for search_res in tweepy.Cursor(api.search, q=self.keyword,\n count=100, result_type=\"recent\", \n since_id=db_max_id, \n max_id=most_recent).items(batch):\n flag -= 1\n print(search_res.id, search_res.created_at)\n self.c.execute('''INSERT OR IGNORE INTO tweets (id, date) \n VALUES (?, ?)''', \n (search_res.id, search_res.created_at))\n except tweepy.TweepError as e:\n print('I caught an error:', e.message)\n flag = 0\n finally:\n self.c.execute(\"SELECT last_insert_rowid() from tweets\")\n rid = self.c.fetchone()[0]\n if rid:\n self.c.execute('''SELECT id FROM tweets WHERE\n rowid={0}'''.format(rid))\n rid = self.c.fetchone()[0]\n most_recent = rid - 1\n data = api.rate_limit_status()\n print(data['resources']['search'])\n self.conn.commit()\n self.conn.close()\n print('REST database file has been created/updated:') \n print(os.path.abspath(_path_finder(\n 'keydata','{0}_rest.db'.format(self.keyword))))", "def store_results(column, datafiles, server, username, password, storage, groups, resultid, investigations, date):\n o = 0\n for name in datafiles[column]:\n cont = subprocess.Popen([\"curl -s -k \" + server + datafiles[column-1][o]], stdout=subprocess.PIPE, shell=True).communicate()[0]\n old_name = strftime(\"%d_%b_%Y_%H:%M:%S\", gmtime()) + \"_\" + name.replace('/', '_').replace(' ', '_')\n with open(username + \"/\" + old_name, \"w\") as outputfile:\n outputfile.write(cont)\n new_name = sha1sum(username + \"/\" + old_name) + \"_\" + old_name\n os.rename(username + \"/\" + old_name, username + \"/\" + new_name)\n for i in investigations:\n for g in groups:\n call([\"curl -s -k -u \" + username + \":\" + password + \" -X MKCOL \" + storage + \"/\" + i.replace('\"', '') + \"/\" +\n g.replace('\"', '') + \"/results_\" + str(resultid)], shell=True)\n call([\"curl -s -k -u \" + username + \":\" + password + \" -T \" + '\\'' + username + \"/\" + new_name + '\\'' + \" \" +\n storage + \"/\" + i.replace('\"', '') + \"/\" + g.replace('\"', '') + \"/results_\" + str(resultid) + \"/\" + new_name], shell=True)\n call([\"curl http://127.0.0.1:3030/ds/update -X POST --data 'update=INSERT DATA { GRAPH <http://127.0.0.1:3030/ds/data/\" +\n username.replace('@', '') + \"> { <http://127.0.0.1:3030/\" + str(resultid) + \"> <http://127.0.0.1:3030/ds/data?graph=\" +\n username.replace('@', '') + \"#pid> \\\"\" + storage + \"/\" + i.replace('\"', '') + \"/\" + g.replace('\"', '') +\n \"/results_\" + str(resultid) + \"/\" + new_name + \"\\\" } }' -H 'Accept: text/plain,*/*;q=0.9'\"], shell=True)\n call([\"curl http://127.0.0.1:3030/ds/update -X POST --data 'update=INSERT DATA { GRAPH <http://127.0.0.1:3030/ds/data/\" +\n username.replace('@', '') + \"> { <http://127.0.0.1:3030/\" + str(resultid) + \"> <http://127.0.0.1:3030/ds/data?graph=\" +\n username.replace('@', '') + \"#results_id> \\\"\" + str(resultid) + \"\\\" } }' -H 'Accept: text/plain,*/*;q=0.9'\"], shell=True)\n call([\"curl http://127.0.0.1:3030/ds/update -X POST --data 'update=INSERT DATA { GRAPH <http://127.0.0.1:3030/ds/data/\" +\n username.replace('@', '') + \"> { <http://127.0.0.1:3030/\" + str(resultid) + \"> <http://127.0.0.1:3030/ds/data?graph=\" +\n username.replace('@', '') + \"#group_id> \\\"\" + g.replace('\"', '') + \"\\\" } }' -H 'Accept: text/plain,*/*;q=0.9'\"], shell=True)\n call([\"curl http://127.0.0.1:3030/ds/update -X POST --data 'update=INSERT DATA { GRAPH <http://127.0.0.1:3030/ds/data/\" +\n username.replace('@', '') + \"> { <http://127.0.0.1:3030/\" + str(resultid) + \"> <http://127.0.0.1:3030/ds/data?graph=\" +\n username.replace('@', '') + \"#investigation_id> \\\"\" + i.replace('\"', '') + \"\\\" } }' -H 'Accept: text/plain,*/*;q=0.9'\"], shell=True)\n call([\"curl http://127.0.0.1:3030/ds/update -X POST --data 'update=INSERT DATA { GRAPH <http://127.0.0.1:3030/ds/data/\" +\n username.replace('@', '') + \"> { <http://127.0.0.1:3030/\" + str(resultid) + \"> <http://127.0.0.1:3030/ds/data?graph=\" +\n username.replace('@', '') + \"#date> \\\"\" + date + \"\\\" } }' -H 'Accept: text/plain,*/*;q=0.9'\"], shell=True)\n call([\"rm\", username + \"/\" + new_name])\n call([\"rm\", username + \"/\" + old_name])\n o += 1", "def read_results(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"results\"]\n with open(input_file) as fin:\n self._results[system] = fin.read().strip() == \"0\"", "def test_get_results_verbose(self):\n\t\tpass", "def main():\n #Set up objects\n client = BigQueryClient()\n writer = FileWriter()\n\n #Send data from big query to a given file.\n # 500 is the limit of data points fetched.\n client.produce_json_data(writer, 500)", "def process_query(query_file):\r\n query_data = query_file.readlines()\r\n query_dict = {}\r\n x = 1 \r\n search_dict = {}\r\n search_dict['username'] = query_data[x].strip('\\n')\r\n x += 1\r\n operation_list = []\r\n \r\n while query_data[x] != 'FILTER\\n': \r\n operation_list.append(query_data[x].strip('\\n'))\r\n x += 1\r\n \r\n search_dict['operations'] = operation_list \r\n query_dict['search'] = search_dict \r\n x += 1\r\n \r\n filter_dict = {}\r\n filter_format(filter_dict, query_data, 'name-includes', x)\r\n filter_format(filter_dict, query_data, 'location-includes', x)\r\n filter_format(filter_dict, query_data, 'follower', x)\r\n filter_format(filter_dict, query_data, 'following', x)\r\n query_dict['filter'] = filter_dict\r\n \r\n present_dict = {}\r\n sort_by = query_data[-2].strip('sort-by ')\r\n present_dict['sort-by'] = sort_by.strip('\\n')\r\n \r\n format_type = query_data[-1].lstrip('format ')\r\n present_dict['format'] = format_type\r\n query_dict['present'] = present_dict\r\n \r\n return query_dict", "def request(query):", "def execute_query():\n start_time = time.time()\n\n queries = request.json[\"queries\"]\n random_command = request.json[\"random_command\"]\n\n \"\"\" Running the queries against the pre-loaded index. \"\"\"\n output_dict = runner.run_queries(queries, random_command)\n\n \"\"\" Dumping the results to a JSON file. \"\"\"\n with open(output_location, 'w') as fp:\n json.dump(output_dict, fp)\n\n response = {\n \"Response\": output_dict,\n \"time_taken\": str(time.time() - start_time),\n \"username_hash\": username_hash\n }\n return flask.jsonify(response)", "def query_api(term, location, RADIUS_SIZE, RESTRICTED):\n response = search(API_KEY, term, location, 0, RADIUS_SIZE)\n businesses = response.get('businesses')\n\n if not businesses:\n print(u'No businesses for {0} in {1} found.'.format(term, location))\n return\n numFound = 0\n while len(businesses) >= 50 + numFound:\n numFound += 50\n response = search(API_KEY, term, location, numFound, RADIUS_SIZE)\n more_businesses = response.get('businesses')\n if more_businesses is not None:\n businesses.extend(more_businesses)\n\n names = []\n contacts = []\n addresses = []\n urls = []\n categories = []\n city = []\n state = []\n zipcode = []\n radius = []\n #Create a list from the names\n #Cross reference with restricted and delete elements that are matching\n for i in range(0, len(businesses)):\n not_matched = True\n for j in range (0, len(RESTRICTED)):\n if(businesses[i]['name'] == RESTRICTED[j].strip('\\n')):\n not_matched = False\n if(not_matched and (businesses[i]['distance']) < RADIUS_SIZE):\n names.append(businesses[i]['name'])\n radius.append(businesses[i]['distance'] / 1600)\n contacts.append(businesses[i]['display_phone'])\n addresses.append(businesses[i]['location']['address1'])\n city.append(businesses[i]['location']['city'])\n state.append(businesses[i]['location']['state'])\n zipcode.append(businesses[i]['location']['zip_code'])\n categories.append(businesses[i]['categories'][0]['title'])\n urls.append(businesses[i]['url'])\n list_restaurants = open('target_restaurants.txt', 'w')\n for x in range(0, len(names)):\n try:\n list_restaurants.write(\"%s\\t\" % names[x])\n list_restaurants.write(\"%s\\t\" % contacts[x])\n list_restaurants.write(\"%s\\t\" % radius[x])\n list_restaurants.write(\"%s\\t\" % addresses[x])\n list_restaurants.write(\"%s\\t\" % city[x])\n list_restaurants.write(\"%s\\t\" % state[x])\n list_restaurants.write(\"%s\\t\" % zipcode[x])\n list_restaurants.write(\"%s\\t\" % categories[x])\n list_restaurants.write(\"%s\\n\" % urls[x])\n except UnicodeEncodeError:\n continue\n\n print(\"Businesses found and printed to target_restaurants.txt file\")", "def main():\n parser = OptionParser(usage=\"%prog <sourcefile> [-s site] [-q] [-t] [-f outfile]\", \n version=\"SecPoint.com %prog \"+VERSION,\n epilog=\"SecPoint.com Google Penetration Testing Hack Database v. \"+VERSION)\n parser.add_option(\"-o\", \"--output\", dest=\"filename\",\n help=\"save output to file\", metavar=\"FILE\")\n parser.add_option(\"-s\", \"--site\", dest=\"sitename\",\n help=\"generate queries for the SITE\", metavar=\"SITE\")\n parser.add_option(\"-m\", \"--multiple\", dest=\"listfilename\",\n help=\"generate queries for multiple sites listed in LISTFILE\", metavar=\"LISTFILE\")\n parser.add_option(\"-q\", \"--query\",\n action=\"store_true\", dest=\"gen_query\", default=False,\n help=\"generate google query urls for each line\")\n parser.add_option(\"-t\", \"--html\",\n action=\"store_true\", dest=\"gen_html\", default=False,\n help=\"generate output in HTML format (implies -q)\")\n (options, args) = parser.parse_args()\n if len(args) != 1:\n print \"\"\"SecPoint.com Google Penetration Testing Hack Database\n\n The Portable Penetrator - Wifi Recovery - Vulnerability Scanner\n http://www.secpoint.com/portable-penetrator.html\n \"\"\"\n parser.print_help()\n print SAMPLES\n exit()\n #parser.error(\"please set source file (could be found in 'db' dir)\")\n #all options \n site_name = options.sitename\n gen_html = options.gen_html\n gen_query = options.gen_query\n out_file = options.filename\n multlist_file = options.listfilename\n db_dir = os.path.join(os.path.dirname(__file__),'db')\n source_file = os.path.join(db_dir,args[0])\n if not os.path.isfile(source_file):\n parser.error(\"could not find source file! Please check if it exists in 'db' dir\")\n\n #starting!\n strs = get_strings(source_file)\n if not strs:\n print \"Can't get data from your source file!\"\n exit()\n queries = []\n if site_name and multlist_file:\n print \"Please use -s OR -m switches alone!\"\n exit() \n if site_name:\n strs = append_sitename(strs,site_name)\n if multlist_file:\n if not os.path.isfile(multlist_file):\n print \"Could not find file from -m switch!\"\n exit()\n mlst = open(multlist_file).read().split('\\n')\n strsnew = [] #using multiple sites to create queries\n for i in mlst:\n strsnew.extend(append_sitename(strs,i))\n strs = strsnew \n if gen_query:\n [strs,queries] = gen_google_query(strs)\n if gen_html:\n if not gen_query: #if not previuosly generated\n [strs,queries] = gen_google_query(strs)\n strs = gen_html_output(strs,queries)\n else:\n if queries: \n strs = queries\n\n save_output(strs,out_file)", "def fetch_data(args):\n logger.debug(\"Running the fetch_data function\")\n\n #Loading the config\n with open(os.path.join(\"Config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Starting the scraping process\n tstart = datetime.datetime.now()\n err_count = 0\n\n logger.info(\"Starting web scraping now.\")\n for i in range(config[\"fetch_data\"][\"indices\"][\"start\"], config[\"fetch_data\"][\"indices\"][\"end\"]+1):\n try:\n time.sleep(1)\n req_link1 = \"http://www.gutenberg.org/cache/epub/\" + str(i) + \"/pg\" + str(i) + \".txt\"\n response1 = requests.get(req_link1)\n \n req_link2 = \"http://www.gutenberg.org/files/\" + str(i) + \"/\" + str(i) + \"-0.txt\"\n response2 = requests.get(req_link2)\n \n response1.encoding = \"UTF-8\"\n response2.encoding = \"UTF-8\"\n \n if response1.status_code == 200:\n with open(config[\"fetch_data\"][\"save_location\"] + str(i) + \".txt\", \"w\", encoding=\"UTF-8\") as text_file:\n text_file.write(response1.text)\n \n elif response2.status_code == 200:\n with open(config[\"fetch_data\"][\"save_location\"] + str(i) + \".txt\", \"w\", encoding=\"UTF-8\") as text_file:\n text_file.write(response2.text)\n \n else:\n err_count = err_count + 1 \n logger.error(\"Status Code {} returned for index {}\".format(response.status_code, i))\n \n if i % 500 == 0:\n time.sleep(30)\n logger.info(\"At Index {}. Time Elapsed: {}\".format(i, datetime.datetime.now()-tstart)) \n\n except Exception as e:\n logger.error(e)\n \n logger.info(\"Total Errorred documents: {}\".format(err_count))\n logger.info(\"Total Successful documents: {}\".format(config[\"fetch_data\"][\"indices\"][\"end\"] - config[\"fetch_data\"][\"indices\"][\"start\"] + 1 -err_count))\n logger.info(\"Total Time taken: {}\".format(datetime.datetime.now()-tstart))\n\n return", "async def wolfram(self, ctx, *, query: str):\n with open(self.json_file) as f:\n api_key = json.load(f)['api_key']\n\n url = 'http://api.wolframalpha.com/v2/query'\n want_image = query.split(' ')[0] == 'image'\n if not want_image:\n params = {'appid': api_key, 'input': query, 'format': 'plaintext'}\n async with ctx.typing():\n async with self.session.get(url=url, params=params) as response:\n if response.status == 200:\n html = await response.text()\n soup = BeautifulSoup(html, 'html.parser')\n success = soup.find('queryresult')['success']\n if success == 'true':\n query_input = soup.find('plaintext').contents\n full_response = '<http://www.wolframalpha.com/input/?i={}>'.format(parse.quote_plus(query))\n message = '**Full Response:** {} \\n'.format(full_response)\n message += '**Input:** {} \\n'.format(query_input[0])\n message += '**Result:** \\n' \\\n '```\\n'\n for elem in soup.find_all('plaintext')[1:6]:\n if len(elem) > 0:\n message += elem.contents[0] + '\\n'\n message += '```'\n\n await ctx.send(message)\n else:\n await ctx.send('Query was unsuccessful please try something else')\n else:\n re_query = query.split(' ')[1:]\n re_query = ' '.join(re_query)\n params = {'appid': api_key, 'input': re_query, 'format': 'plaintext,image'}\n async with ctx.typing():\n async with self.session.get(url=url, params=params) as response:\n if response.status == 200:\n soup = BeautifulSoup(await response.text(), 'html.parser')\n success = soup.find('queryresult')['success']\n if success == 'true':\n query_input = soup.find('plaintext').contents\n full_response = '<http://www.wolframalpha.com/input/?i={}>'.format(parse.quote_plus(re_query))\n message = '**Full Response:** {} \\n'.format(full_response)\n message += '**Input:** {} \\n'.format(query_input[0])\n message += '**Result:** \\n'\n await ctx.send(message)\n for elem in soup.find_all('img')[1:5]:\n await ctx.send(elem['src'])\n else:\n await ctx.send('Query was unsuccessful please try something else')", "def query_DB_satellites(outputpath=\"../data/\", user=\"anonimo\", passwd=\"secreto\"):\n #define the output file\n outputfile=outputpath+\"milky_way_satellites.csv\"\n # Build the SQL query\n \n query = \"with milky_way_halos as (select * from Bolshoi..BDMW where snapnum=416 and Mvir > 5.0E11 and Mvir < 6.0E11 ) select sub.* from milky_way_halos mwh, Bolshoi..BDMW sub where sub.snapnum = 416 and sub.hostFlag = mwh.bdmId\"\n\n # Build the wget command to query the database\n website = \"http://wget.multidark.org/MyDB?action=doQuery&SQL=\"\n username = user\n password = passwd\n \n wget_options=\" --content-disposition --cookies=on --keep-session-cookies --save-cookies=cookie.txt --load-cookies=cookie.txt --auth-no-challenge\" \n wget_options=wget_options+\" -O \"+outputfile +\" \"\n wget_command=\"wget --http-user=\"+username+\" --http-passwd=\"+password+\" \"+wget_options \n command=wget_command + \"\\\"\"+ website + query+\"\\\"\"\n print \"\"\n print query\n print \"\"\n print command\n print \"\"\n # execute wget in shell\n retcode = call(command,shell=True)", "def GetPyclassyfireResults(QueryIDDict):\r\n ResultsFolder = \"/mnt/scratch/hoeks102/Thesis_Bsc/mibig_classyfire_results/\"\r\n for key, QueryID in QueryIDDict.items():\r\n try:\r\n json = pyclassyfire.client.get_results(QueryID, 'json')\r\n FixedCompoundID = key.replace(' ','_')\r\n with open(ResultsFolder+FixedCompoundID+'.json','w') as w:\r\n w.write(json)\r\n except Exception as e:\r\n print(e)\r\n print(key)\r\n print(QueryID)\r\n return None", "def start_queryResult_generator(inFile, fDic, work_sheet):\n \"\"\" http://biopython.org/DIST/docs/api/Bio.SearchIO.BlastIO-module.html\"\"\"\n qGenerator = SearchIO.parse(inFile, 'blast-xml')\n max_hits = 0\n query_count = 1\n # Step through all the records in the lump xml data file and write out\n # each separate hit to file. Also write the summary information to the\n # work sheet.\n for query_result in qGenerator:\n print('Processing Query BLAST return ' + str(query_count))\n number_hits = int(len(query_result.hits))\n # Extend header out right if new MAXHITS\n if number_hits > max_hits:\n max_hits = number_hits \n if number_hits == 0:\n # Construct path plus file name for no hit query\n filename = str(fDic['topDir'] + fDic['noHit'] + 'Query_' \n + str(query_count) + '_H_none.xml')\n # Write out any Queries that had to hits to a no Hit subfolder\n SearchIO.write(query_result, filename, 'blast-xml')\n write_qr_to_ws(query_count, query_result, work_sheet)\n else :\n # Now set up a counter of 'hits' in the QueryResult so hit's\n # can be sliced away into their own record cleanly.\n hit_count = 0;\n for hit in query_result.hits:\n total_hsps = len (hit.hsps)\n lowest_eval = hit.hsps[0].evalue\n best_hsp = hit.hsps[0]\n for hsp in hit.hsps:\n if hsp.evalue < lowest_eval:\n lowest_eval = hsp.evalue\n best_hsp = hsp\n filename = str(fDic['topDir'] + outputFileName(query_count, hit, best_hsp))\n SearchIO.write(query_result[hit_count:(hit_count + 1)], filename , 'blast-xml')\n hit_count += 1\n # Write out query_result to worksheet \n write_qr_to_ws(query_count, query_result, work_sheet)\n query_count += 1\n # break is debugging code\n # if query_count == 20:\n # break\n build_ws_header(work_sheet, max_hits)\n return qGenerator", "def query(self):\n pass", "def search_file(self, file_name):\n try:\n total_ops = self.key_end - self.key_start\n run_ops = total_ops/10\n print \"Staring Benchmark Searching file in the Server...\"\n t1 = time.time()\n for i in range(run_ops):\n for j in range(1,11,1):\n file_name = \"text-\"+str(j)+\"kb\"\n self.service.get(file_name)\n t2 = time.time()\n print \"%s Search operations = %s sec\" % (total_ops,t2-t1)\n print \"per Search operation = %s sec\" % ((t2-t1)/total_ops)\n print \"per Search operation = %s msec\" % (((t2-t1)/total_ops)*1000)\n except Exception as e:\n print \"Search File Error, %s\" % e", "def _query_jobOutput(self, job_url):\n print('WaPOR API: _query_jobOutput')\n\n request_url = job_url\n\n ijob = 0\n contiue = True\n wait_time = 0\n if self.print_job:\n print(request_url)\n\n while contiue:\n # requests\n try:\n resq = requests.get(\n request_url)\n resq.raise_for_status()\n except requests.exceptions.HTTPError as err:\n raise Exception(\"WaPOR API Http Error: {e}\".format(e=err))\n except requests.exceptions.ConnectionError as err:\n raise Exception(\"WaPOR API Error Connecting: {e}\".format(e=err))\n except requests.exceptions.Timeout as err:\n raise Exception(\"WaPOR API Timeout Error: {e}\".format(e=err))\n except requests.exceptions.RequestException as err:\n raise Exception(\"WaPOR API OOps: Something Else {e}\".format(e=err))\n else:\n resq_json = resq.json()\n try:\n resp = resq_json['response']\n # print(resp)\n\n if resq_json['message'] == 'OK':\n jobType = resp['type']\n\n if self.print_job:\n print('WaPOR API: {i} {t}sec {s}'.format(\n i=ijob, t=wait_time, s=resp['status']))\n\n if resp['status'] == 'COMPLETED':\n contiue = False\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n if jobType == 'CROP RASTER':\n output = resp['output']['downloadUrl']\n elif jobType == 'AREA STATS':\n results = resp['output']\n output = pd.DataFrame(\n results['items'], columns=results['header'])\n else:\n print('WaPOR API ERROR: Invalid jobType {t}'.format(\n t=jobType))\n return output\n elif resp['status'] == 'COMPLETED WITH ERRORS':\n contiue = False\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n print(resp['log'][-1])\n elif resp['status'] == 'WAITING':\n contiue = True\n if wait_time % 60 == 0:\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n time.sleep(TIME_SLEEP_SECOND)\n wait_time += TIME_SLEEP_SECOND\n if wait_time > TIME_REQUEST_AFTER_SECOND:\n contiue = False\n print(resp['log'][-1])\n elif resp['status'] == 'RUNNING':\n contiue = True\n if wait_time % 60 == 0:\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n time.sleep(TIME_SLEEP_SECOND)\n wait_time += TIME_SLEEP_SECOND\n if wait_time > TIME_REQUEST_AFTER_SECOND:\n contiue = False\n print(resp['log'][-1])\n else:\n raise Exception('WaPOR API ERROR:'\n ' Unkown status'\n ' \"{s}\".'.format(s=resp['status']))\n else:\n print(resq_json['message'])\n except BaseException:\n print('WaPOR API ERROR: Cannot get {url}'.format(url=request_url))\n\n ijob += 1", "def dwn_all_saved_results(request):\n \n sources = []\n for i in Source.objects.filter(user=request.user):\n sources.append((i.source_id, i.datetime_extracted.strftime('%d/%m/%Y %H:%M'), i.source))\n \n data = []\n for s, timee, s_name in sources:\n objs = ExtractedRelation.objects.filter(source=s)\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, timee, s_name, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Extraction Time', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/all_analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/all_analysis_results.csv','rb'))", "def download_all_ground_truths(request):\n\n json_resp = {}\n json_resp['ground_truth'] = []\n cursor = connection.cursor()\n mode = request.GET.get('gt_mode',None)\n if mode is None:\n human = NameSpace.objects.get(ns_id = 'Human')\n robot = NameSpace.objects.get(ns_id = 'Robot')\n gt_human = GroundTruthLogFile.objects.filter(ns_id = human)\n agent = User.objects.get(ns_id = robot,username = 'Robot_user')\n gt_robot = GroundTruthLogFile.objects.filter(ns_id = robot,username = agent)\n for el in gt_human:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n for el in gt_robot:\n gt_json = el.gt_json\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n cursor.execute(\"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",['Robot','Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n elif mode.lower() == 'automatic':\n cursor.execute(\n \"SELECT gt_json FROM ground_truth_log_file WHERE ns_id = %s AND username != %s\",\n ['Robot', 'Robot_user'])\n\n #CAMBIO\n # cursor.execute(\n # \"SELECT g.gt_json FROM ground_truth_log_file AS g INNER JOIN ground_truth_log_file AS gg ON g.id_report = gg.id_report AND g.language = gg.language AND g.gt_type = gg.gt_type AND g.id_report = gg.id_report AND g.ns_id = gg.ns_id WHERE g.ns_id = %s AND g.username != %s AND gg.username = %s AND g.insertion_time != gg.insertion_time\",\n # ['Robot', 'Robot_user', 'Robot_user'])\n ans = cursor.fetchall()\n for el in ans:\n gt_json = json.loads(el[0])\n\n if gt_json['gt_type'] == 'concept-mention':\n gt_json['gt_type'] = 'linking'\n json_resp['ground_truth'].append(gt_json)\n\n return JsonResponse(json_resp)", "def test_query_cached(self):\n CreateMatch()\n\n data = {\n \"term1\": \"TESTURL1\",\n \"term2\": \"TESTURL2\"\n }\n response = self.app.post(\n \"/degree\", data=data, follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n\n self.assertIn(\"33 degrees\", res_txt)\n self.assertIn(\"YAAAAY!\", res_txt)", "def test_post_results(get_interface_params, protocol_constant, protocol_name):\n from sail_on_client.protocol.localinterface import LocalInterface\n\n config_directory, config_name = get_interface_params\n local_interface = LocalInterface(config_name, config_directory)\n session_id = _initialize_session(local_interface, protocol_name)\n result_files = {\n protocol_constant: os.path.join(\n os.path.dirname(__file__), f\"test_results_{protocol_name}.1.1.1234.csv\"\n )\n }\n local_interface.post_results(\n result_files, f\"{protocol_name}.1.1.1234\", 0, session_id\n )", "def report_results(data):\n global _HAS_REPORTED_RESULTS # pylint:disable=global-statement\n if _HAS_REPORTED_RESULTS:\n raise RuntimeWarning(\"Has already reported evaluation results once.\")\n if IS_ORION_ON:\n with open(RESULTS_FILENAME, 'w') as results_file:\n json.dump(data, results_file)\n else:\n print(data)\n _HAS_REPORTED_RESULTS = True", "def test_successful_file(self):\n\n url = '/%s/jobs/%i/input_files/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n results = result['results']\n self.assertEqual(len(results), 2)\n for result in results:\n self.assertTrue(result['id'] in [self.file3.id, self.file4.id])\n self.assertIn('file_name', result)\n self.assertIn('workspace', result)\n self.assertIn('media_type', result)\n self.assertIn('file_type', result)\n self.assertIn('file_size', result)\n self.assertIn('file_path', result)\n self.assertIn('is_deleted', result)\n self.assertIn('url', result)\n self.assertIn('created', result)\n self.assertIn('deleted', result)\n self.assertIn('data_started', result)\n self.assertIn('data_ended', result)\n self.assertIn('source_started', result)\n self.assertIn('source_ended', result)\n self.assertIn('last_modified', result)\n self.assertIn('geometry', result)\n self.assertIn('center_point', result)\n self.assertIn('countries', result)\n self.assertIn('job_type', result)\n self.assertIn('job', result)\n self.assertIn('job_exe', result)\n self.assertIn('job_output', result)\n self.assertIn('recipe_type', result)\n self.assertIn('recipe', result)\n self.assertIn('recipe_node', result)\n self.assertIn('batch', result)\n self.assertFalse(result['is_superseded'])\n self.assertIn('superseded', result)", "def request(self, file_name=None):\n self.__log('Starting to build a request.', 'warning')\n if file_name:\n self.__log(f'File name specified. Setting output filename to \"{file_name}\"')\n self.output_filename = file_name\n\n self.validate()\n url = self.create_query_url()\n self.__log('Initiating post request to query URL.')\n req = requests.post(url, data=self.options)\n self.handle_response(req)\n self.cleanup()", "def searchAllTableName(nomeDb):\n\n #value=seqOfAsciiCode(nomeDb)\n #where =informationSchema.condizioneSulDb + \" = CHAR(\"+value+\")\";\n #print (where)\n #where=\"\"+informationSchema.condizioneSulDb+\" = (SELECT %s FROM %s LIMIT %s,1 )\"%(informationSchema.colonnaNomeDb,informationSchema.tabellaDB,1);\n if(OptionConfiguration.methodSentData==\"POST\"):\n value = seqOfAsciiCode(nomeDb)\n where = informationSchema.condizioneSulDb + \" = CHAR(\" + value + \")\";\n #print (where)\n numeroRighe=countValueofTablePost(informationSchema.tabelleContienteNameTabelle,where)\n #print (numeroRighe)\n print(\"\")\n if(numeroRighe!=None):\n print (\"Num of table of %s -> %s\"%(nomeDb,numeroRighe))\n print (\"\")\n valori=searchValueofTablePost(numeroRighe,informationSchema.tabelleContienteNameTabelle,informationSchema.colonnaNomeTabelle,where)\n\n if(valori!=None):\n\n fileWrite = open(\"TableNameOf_%s_.txt\"%(nomeDb), 'w')\n for value in valori:\n fileWrite.write(str(value) + '\\n')\n fileWrite.close()\n print (OptionConfiguration.bcolors.BOLD+\"Valori scritti su TableNameOf_%s_.txt\"%(nomeDb)+OptionConfiguration.bcolors.ENDC)\n\n else:\n print (OptionConfiguration.bcolors.BOLD+OptionConfiguration.bcolors.FAIL+\"Error no value find\"+OptionConfiguration.bcolors.ENDC)\n\n else:\n print (OptionConfiguration.bcolors.BOLD+OptionConfiguration.bcolors.FAIL+\"Error num of rows =0 \"+OptionConfiguration.bcolors.ENDC)\n\n elif(OptionConfiguration.methodSentData==\"GET\"):\n value = seqOfAsciiCode(nomeDb)\n where = informationSchema.condizioneSulDb + \" = CHAR(\" + value + \")\";\n #print(where)\n numeroRighe=countValueofTableGet(informationSchema.tabelleContienteNameTabelle,where)\n# print (numeroRighe)\n print (\"\")\n if(numeroRighe!=None):\n print (\"Num of table of %s -> %s\" % (nomeDb,numeroRighe))\n print (\"\")\n valori=searchValueofTableGet(numeroRighe,informationSchema.tabelleContienteNameTabelle,informationSchema.colonnaNomeTabelle,where)\n if(valori!=None):\n\n fileWrite = open(\"TableNameOf_%s_.txt\" % (nomeDb), 'w')\n for value in valori:\n fileWrite.write(str(value) + '\\n')\n fileWrite.close()\n print (\"Valori scritti su TableNameof_%s_.txt\"%(nomeDb))\n\n else:\n print (OptionConfiguration.bcolors.BOLD + OptionConfiguration.bcolors.FAIL + \"Error no value find\" + OptionConfiguration.bcolors.ENDC)\n\n else:\n print (OptionConfiguration.bcolors.BOLD + OptionConfiguration.bcolors.FAIL + \"Error num of rows =0 \" + OptionConfiguration.bcolors.ENDC)", "def report_results(results: dict):\n # Loop thru our results, compare to our upload and return the verdict\n for result in results:\n for item in Analyzer.files:\n if result[\"sha256\"] == item[2]:\n if \"no specific threat\" in result[\"verdict\"]:\n # File is clean\n logger.info(\"Verdict for %s: %s\", item[1], result[\"verdict\"])\n else:\n # Mitigation would trigger from here\n logger.warning(\"Verdict for %s: %s\", item[1], result[\"verdict\"])", "def getResults(server, tasksInfo, maxNumberAnswers, oper = 0, fileName1 = 'data/jsonAnswersInfo.dat', fileName2 = 'data/jsonUsableInfo.dat'):\n #~ usableData = open('usableData.dat', 'w')\n answersApp = []\n usableTasks = []\n numberTasks = len(tasksInfo)\n if oper == 0:\n answerIdx = 0\n #~ for item, number in enumerate(tasksInfo):\n for item in range(numberTasks):\n JSONdata = urllib2.urlopen(url=server+\"/api/taskrun?task_id=\"+ \\\n str(tasksInfo[item]['taskId'])+\"&limit=\"+ \\\n str(maxNumberAnswers)).read()\n data = json.loads(JSONdata)\n lenData = len(data)\n #HARDCODE BEGINS - Testing the obtaining of an exact number of answers\n if (lenData < 0):\n # If there are less answers, we pop the item out!\n #~ trash = tasksInfo.pop(item)\n continue\n else:\n print \"Task \" + str(tasksInfo[item]['taskId']) + \" has \" + str(lenData) + \" answers. NICE! :-)\\n\"\n usableTasks.append(tasksInfo[item])\n #HARDCODE MIDDLE\n #~ usableData.write(str(tasksInfo[item]['taskId'])+\" \"+str(tasksInfo[item]['area'])+\"\\n\")\n answersApp.append([])\n for ans in range(lenData):\n answersApp[answerIdx].append({'taskId':data[ans]['task_id'], \\\n 'id':data[ans]['id'], 'answer':data[ans]['info']['besttile']})\n answerIdx = answerIdx + 1\n #HARDCODE END\n with open(fileName1,'w') as outfile:\n json.dump(answersApp, outfile)\n outfile.close()\n with open(fileName2,'w') as outfile:\n json.dump(usableTasks, outfile)\n outfile.close()\n elif oper == 1:\n with open(fileName1,'r') as outfile:\n answersApp = json.load(outfile)\n outfile.close()\n with open(fileName2,'r') as outfile:\n usableTasks = json.load(outfile)\n outfile.close()\n print 'number of tasks: ', len(tasksInfo)\n print 'number of usable tasks: ', len(usableTasks)\n print 'number of usable answers: ', len(answersApp)\n #~ usableData.close()\n #~ exit(1)\n return (usableTasks, answersApp)", "def scan(infile):\n with open(infile, 'r') as fj:\n args = json.load(fj)\n api_request(args['update'], args['format'], args['stream'], \n args['params'], args['year'], args['months'], \n args['timestep'], args['back'])", "def main():\n\n rapidapi_key = os.getenv(\"RAPIDAPIKEY\")\n geniuslyrics_key = os.getenv(\"GENIUSLYRICSKEY\")\n spotify_client_id = os.getenv(\"SPOTIFYCLIENTID\")\n spotify_secret_key = os.getenv(\"SPOTIFYSECRETKEY\")\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Fetching songs and artist\")\n\n years = [year for year in range(1960, 2016)]\n urls = create_urls(years)\n songs_df = fetch_and_parse(urls, years)\n\n logger.info(\"Adding 2013 info from data/raw/ (nasty format in the website)\")\n\n # have to use ; for separator as song names contain commas\n songs_df_2013 = pd.read_csv(\n os.path.join(\"data\", \"raw\", \"2013_top_100.csv\"), sep=\";\"\n )\n songs_df = pd.concat([songs_df, songs_df_2013], ignore_index=True)\n\n songs_df[\"lyrics\"] = \"Not searched\"\n songs_df[\"lyrics_source\"] = None\n\n logger.info(\"Saving song and artist data to disk\")\n\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), index=False, sep=\";\"\n )\n\n logger.info(\"Fetching song lyrics\")\n\n songs_df = pd.read_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\"\n )\n\n songs_amount = len(songs_df)\n fetched_songs = 0\n\n for row_index, row in songs_df.iterrows():\n logger.info(f\"Song {row_index + 1} / {songs_amount}\")\n\n if row[\"lyrics\"] == \"Not searched\" or row[\"lyrics\"] == \"Not found\":\n\n # slowing down requests so that we cause no trouble\n time.sleep(0.5)\n\n lyric, source = get_lyric_from_apis(\n artist=row[\"artist\"],\n song_title=row[\"song\"],\n rapidapi_key=rapidapi_key,\n geniuslyrics_key=geniuslyrics_key,\n )\n songs_df.iloc[row_index, songs_df.columns.get_loc(\"lyrics\")] = lyric\n songs_df.iloc[row_index, songs_df.columns.get_loc(\"lyrics_source\")] = source\n\n fetched_songs += 1\n print(lyric)\n\n # saving every after every 100 fetched lyrics\n if fetched_songs > 0 and fetched_songs % 100 == 0:\n print(\"Saving progress\")\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"),\n sep=\";\",\n index=False,\n )\n\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\", index=False\n )\n\n songs_df = pd.read_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\"\n )\n\n logger.info(\"Fetching audio features from Spotify API\")\n\n audio_features_df = get_spotify_audiofeatures(\n artists=songs_df[\"artist\"],\n song_titles=songs_df[\"song\"],\n spotify_client_id=spotify_client_id,\n spotify_secret_key=spotify_secret_key,\n )\n songs_df = pd.concat([songs_df, audio_features_df], axis=\"columns\")\n\n logger.info(\"Saving final dataset to disk\")\n\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\", index=False\n )", "def auto_search_write(self, auto_search_result_df, out_csv):\n self.logger.info('Starting auto search and write')\n all_result_ids = auto_search_result_df['RESULT_ID'].unique()\n\n # validation of df structure\n required_col = ['RESULT_ID', 'SERIES_ID', 'RESULT_SERIES_SEQ_ID', 'QUERY_MOL_ID', 'RESULT_MOL_ID',\n 'RESULT_CONTEXT_ID', 'QUERY_FRAG_ID', 'QUERY_MOL_ID', 'QUERY_CONTEXT_ID', 'RESULT_FRAG_ID',\n 'QUERY_ORDER', 'RESULT_MOL_ACTIVITY']\n\n for col in required_col:\n if col not in auto_search_result_df.columns:\n raise Exception(\"Input data table does not have required columns: %s\" % col)\n\n # catch for empty table\n if auto_search_result_df.shape[0] == 0:\n print (\"No results found\")\n return False\n\n iteration = 1\n return_df = None\n\n for result_id in all_result_ids:\n\n self.logger.info(\"Result, series ID %s from table size %s: \" % (result_id, auto_search_result_df.shape[0]))\n\n sub_series_df = auto_search_result_df[auto_search_result_df['RESULT_ID'] == result_id]\n\n # get the original query mol_id_list in it's original query order\n # it can be mis-ordered due to strict_order=False param on the search method\n mol_id_list = list(zip(sub_series_df['QUERY_MOL_ID'].tolist(), sub_series_df['QUERY_ORDER'].tolist()))\n mol_id_list = sorted(mol_id_list, key=lambda xx: xx[1])\n mol_id_list = [x[0] for x in mol_id_list if x[1] > 0]\n\n self.logger.debug('Merging results to CSV frame for iteration %s and dataframe %s' %\n (iteration, str(sub_series_df.shape)))\n\n if iteration == 1:\n return_df = self.return_scored_series_dataframe(mol_id_list, sub_series_df, return_df, append=False)\n self.logger.debug('First iteration, sized at %s' % str(return_df.shape))\n iteration += 1\n else:\n # as above but append=True\n return_df = self.return_scored_series_dataframe(mol_id_list, sub_series_df, return_df, append=True)\n self.logger.debug('Merge operation, sized at %s' % str(return_df.shape))\n iteration += 1\n\n # return_df = self.enumerate_products(return_df, 'QUERY_MOL_CONTEXT', 'NEW_FRAG_R')\n\n return_df.to_csv(out_csv, index=False, float_format='%.3f') # , header=True)\n self.logger.info('Completed write of auto_search results')", "def main():\n right_now = get_current_datetime()\n # print(right_now)\n existing_dict, unique_exist = get_sensor_dict()\n # print(type(existing_dict))\n # print()\n # print(sorted(unique_exist))\n whats_up_list = query_the_api()\n # print(whats_up_list)\n found = filter_json(whats_up_list)\n # print(found)\n lost_new_stillthere(sorted(unique_exist), found, existing_dict, right_now)", "def apicall(self, dasquery, url, api, args, dformat, expire):\n # NOTE: I use helper function since it is 2 step process\n # therefore the expire time stamp will not be changed, since\n # helper function will yield results\n time0 = time.time()\n if api == 'dataset4site_release' or api == 'site4block' or \\\n api == 'site4dataset' or 'files4dataset_runs_site':\n genrows = self.helper(api, args, expire)\n # here I use directly the call to the service which returns\n # proper expire timestamp. Moreover I use HTTP header to look\n # at expires and adjust my expire parameter accordingly\n# NOTE: disable dataset4site, lumi4site since they take too much load\n# see combined.yml\n# if api == 'dataset4site':\n# headers = {'Accept': 'application/json;text/json'}\n# datastream, expire = \\\n# getdata(url, args, headers, expire, system='combined')\n# genrows = parse_data(datastream)\n# if api == 'lumi4dataset':\n# headers = {'Accept': 'application/json;text/json'}\n# data, expire = \\\n# getdata(url, args, headers, expire, system='combined')\n# genrows = json_parser(data, None)\n\n # proceed with standard workflow\n ctime = time.time() - time0\n try:\n if isinstance(url, dict):\n url = \"combined: %s\" % url.values()\n self.write_to_cache(dasquery, expire, url, api, \\\n args, genrows, ctime)\n except Exception as exc:\n print_exc(exc)", "def update_result():\n names = get_tests()[0]\n\n for name in names:\n arctern_file = os.path.join(ARCTERN_RESULT, name + '.csv')\n\n update_quote(arctern_file)\n update_bool(arctern_file)", "def bring_records_to_file_using_threads():\n username = username_entry.get()\n password = password_entry.get()\n day = int(day_entry.get())\n month = int(month_entry.get())\n year = int(year_entry.get())\n today = datetime.date(year, month, day)\n if username in users:\n if password == users[username]:\n db = Database(database_name)\n data = db.fetch_calculations(day, month, year)\n # print(data)\n # print(today)\n save_to_file(today, data)", "def test_fetch_working(suvi_client):\n start = '2019/05/25 00:50'\n end = '2019/05/25 00:52'\n wave = 94 * u.Angstrom\n goes_sat = a.goes.SatelliteNumber.sixteen\n tr = a.Time(start, end)\n qr1 = suvi_client.search(tr, a.Instrument.suvi, a.Wavelength(wave), goes_sat, a.Level(2))\n\n # Mock QueryResponse object\n mock_qr = mock_query_object(suvi_client)\n\n # Compare if two objects have the same attribute\n\n mock_qr = mock_qr[0]\n qr = qr1[0]\n\n assert mock_qr['Source'] == qr['Source']\n assert mock_qr['Provider'] == qr['Provider']\n assert mock_qr['Physobs'] == qr['Physobs']\n assert mock_qr['Instrument'] == qr['Instrument']\n assert mock_qr['url'] == qr['url']\n\n assert qr1['Start Time'] == Time(\"2019-05-25T00:52:00.000\")\n assert qr1['End Time'] == Time(\"2019-05-25T00:56:00.000\")\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n download_list = suvi_client.fetch(qr1, path=tmpdirname)\n assert len(download_list) == len(qr1)", "def test_run_any_search(): # ***Incomplete test\n ##########################\n # Arrange.\n queryfile = \"queryfile\"\n\n ##########################\n # Act.\n #x = run_any_search(queryfile)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def getReport(self, *args, **kwargs):\n return_json = dict()\n jdatas = list()\n result, name = is_file(kwargs.get('value'))\n\n if result:\n jdatas = load_file(name)\n if isinstance(jdatas, list):\n jdatas = jdatas\n else:\n jdatas = [jdatas]\n\n kwargs['dump'] = False\n\n else:\n\n if isinstance(kwargs.get('value'), list) and len(kwargs.get('value')) == 1:\n pass\n\n elif isinstance(kwargs.get('value'), six.string_types):\n kwargs['value'] = [kwargs.get('value')]\n\n for hashes_report in kwargs.get('value'):\n if os.path.isfile(hashes_report):\n print('\\nCalculating hash for:', hashes_report)\n hashes_report = hashlib.sha256(open(hashes_report, 'rb').read()).hexdigest()\n\n #ToDo all options\n # https://developers.virustotal.com/v3.0/reference#intelligence-search\n if (kwargs.get('search_intelligence') or 'search_intelligence' in args):\n self.params['query'] = [hashes_report]\n url = self.base.format('intelligence/search')\n else:\n self.params['resource'] = hashes_report\n url = self.base.format('files/{}'.format(hashes_report))\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n tmp_url = \"\"\n if jdata.get('links', {}).get('next', \"\"):\n tmp_url = jdata['links']['next']\n elif isinstance(jdata.get('data'), list) and 'next' in jdata.get('data', list())[0].get('links', dict()):\n tmp_url = jdata['data'][0]['links']['next']\n\n if kwargs.get('search_intelligence_limit', 1) > 1:\n info = self.__aux_search(tmp_url, kwargs['search_intelligence_limit'])\n jdata['data'] += info\n\n if kwargs.get('return_raw'):\n return jdata\n\n jdatas.append(jdata)\n\n if isinstance(jdatas, list) and jdatas == []:\n if kwargs.get('return_raw'):\n pass\n else:\n print('Nothing found')\n return\n\n if not isinstance(jdatas, list):\n jdatas = [jdatas]\n\n for jdata in jdatas:\n if isinstance(jdata, dict):\n if _check_error(jdata):\n continue\n\n if jdata.get('data'):\n\n if kwargs.get('dump'):\n jsondump(jdata, name)\n\n if kwargs.get('not_exit'):\n return False\n\n if kwargs.get('search_intelligence') or 'search_intelligence' in args:\n\n if kwargs.get('return_json') and (kwargs.get('hashes') or 'hashes' in args):\n return_json['hashes'] = [block['attributes']['sha256'] for block in jdata.get('data', [])]\n else:\n print('[+] Matched hash(es):')\n for block in filter(None, jdata['data']):\n # ToDo should check type instead of sha256\n if \"sha256\" not in block['attributes'] and block['type'] != \"domain\":\n continue\n\n if \"sha256\" in block['attributes']:\n try:\n print('{} - FS:{} - LS:{}'.format(block['attributes'].get('sha256', \"\"), \\\n datetime_from_timestamp(block['attributes']['first_submission_date']), \\\n datetime_from_timestamp(block['attributes']['last_analysis_date']))\n )\n if kwargs.get('verbose') or kwargs.get('allinfo'):\n self._parse_aux(block['attributes'], **kwargs)\n print(\"\\n\\n\")\n except Exception:\n print(block)\n elif \"domain\" == block['type']:\n print(block['id'])\n\n if kwargs.get('download'):\n kwargs.update({'value': [block['attributes']['sha256'] for block in jdata.get('data', [])], 'download':'file'})\n self.download(**kwargs)\n else:\n if jdata.get('data', {}).get('attributes', {}):\n self._parse_aux(jdata['data']['attributes'], **kwargs)\n if kwargs.get('allinfo'):\n pass\n #ToDo remove\n \"\"\"\n if kwargs.get('verbose'):\n #print(jdata)\n basic_file_info_list = (\n 'md5',\n 'sha1',\n 'sha256',\n 'ssdeep',\n 'scan_date',\n 'first_seen',\n 'last_seen',\n 'times_submitted',\n 'scan_id',\n 'harmless_votes',\n 'community_reputation',\n 'malicious_votes',\n )\n\n self.simple_print(jdata, basic_file_info_list)\n self.list_print(jdata, ['submission_names'])\n\n if jdata.get('ITW_urls') and ((kwargs.get('ITW_urls') or 'ITW_urls' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json['ITW_urls'] = jdata.get('ITW_urls')\n else:\n self.list_print(jdata, ['ITW_urls'])\n\n if kwargs.get('verbose'):\n file_info_list = (\n 'type',\n 'size',\n 'tags',\n 'unique_sources',\n )\n self.simple_print(jdata, file_info_list)\n\n simple_list = (\n 'magic',\n 'first_seen_itw',\n 'trendmicro-housecall-heuristic',\n 'deepguard',\n 'unique_sources',\n 'trid',\n 'pe-timestamp'\n )\n\n list_list = (\n 'compressed_parents',\n )\n\n dict_keys = (\n 'pe-overlay',\n 'pe-resource-langs',\n 'pe-resource-types',\n 'pe-resource-list',\n )\n\n dict_list_keys = (\n 'sections',\n )\n\n if kwargs.get('verbose'):\n self.simple_print(jdata['additional_info'], simple_list)\n self.list_print(jdata['additional_info'], list_list)\n self.dict_print(jdata['additional_info'], dict_keys)\n self.dict_list_print(jdata['additional_info'], dict_list_keys)\n\n if jdata['additional_info'].get('rombioscheck') and ((kwargs.get('rombioscheck_info') or 'rombioscheck_info' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json['rombioscheck'] = jdata['additional_info'].get('rombioscheck')\n else:\n print('\\n[+] RomBiosCheck:')\n print('\\t')\n\n # this removes code duplication\n simple_list = (\n 'contained_hash',\n 'executable_file',\n 'firmware_volume_count',\n 'max_tree_level', 'format',\n 'raw_objects',\n 'raw_sections',\n 'section_count',\n 'vhash',\n 'win32_file',\n )\n\n list_keys = (\n 'acpi_tables',\n 'nvar_variable_names',\n 'tags'\n )\n\n double_list = (\n 'apple_data',\n 'manufacturer_candidates'\n )\n\n self.simple_print(jdata['additional_info']['rombioscheck'], simple_list)\n self.list_print(jdata['additional_info']['rombioscheck'], list_keys)\n\n for key in double_list:\n if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'):\n self.print_key(key)\n for block in jdata['additional_info']['rombioscheck'].get(key):\n print('\\t', block[0], ':', block[1])\n\n simple_dict = (\n 'smbios_data',\n 'biosinformation',\n 'systeminformation'\n )\n\n for key in simple_dict:\n if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'):\n self.print_key(key)\n plist = [[]]\n for sub_key, value in jdata['additional_info']['rombioscheck'].get(key).items():\n if isinstance(value, list):\n value = '\\n'.join(value)\n plist.append([sub_key, str(value).replace(',', '\\n')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template'))\n del plist\n\n dict_keys = (\n 'option_roms',\n 'certs'\n )\n\n for key in dict_keys:\n if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'):\n self.print_key(key)\n\n for block in jdata['additional_info']['rombioscheck'].get(key, {}):\n plist = [[]]\n for key, value in block.items():\n if isinstance(value, list):\n value = '\\n'.join(value)\n plist.append([key, str(value).replace(',', '\\n')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template'))\n del plist\n\n complex_dict = (\n 'win32children',\n 'children'\n )\n\n for key in complex_dict:\n if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'):\n self.print_key(key)\n\n for cert in jdata['additional_info']['rombioscheck'].get(key, {}):\n plist = [[]]\n for key, value in cert.items():\n if key == 'detection_ratio':\n value = '/'.join([str(num) for num in value])\n if key in ('tags', 'imports'):\n value = '\\n'.join(value)\n if key == 'certs':\n\n certs = list()\n for certificates in value:\n for sub_key, sub_value in certificates.items():\n if sub_key == 'subject':\n certs.append('{0}: {1}\\n\\n----------------'.format(sub_key, sub_value))\n else:\n certs.append('{0}: {1}'.format(sub_key, sub_value))\n value = '\\n'.join(certs)\n plist.append([key, value])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], [20, 64], ['r', 'l'], kwargs.get('email_template'))\n\n del plist\n\n if jdata['additional_info'].get('rombios_generator') and ((kwargs.get('rombios_generator_info') or 'rombios_generator_info' in args) or kwargs.get('verbose')):\n\n if kwargs.get('return_json'):\n return_json['rombios_generator'] = jdata['additional_info'].get('rombios_generator')\n else:\n print('\\n[+] RomBios Generator:')\n dict_keys = (\n 'source',\n )\n\n for key in dict_keys:\n if jdata['additional_info']['rombios_generator'].get(key) and kwargs.get('verbose'):\n self.print_key(key)\n plist = [[]]\n for key, value in jdata['additional_info']['rombios_generator'].get(key, {}).items():\n if isinstance(value, list):\n value = '\\n'.join(value)\n plist.append([key, str(value).replace(',', '\\n')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template'))\n\n del plist\n\n\n if jdata['additional_info']['rombios_generator'].get('diff') and kwargs.get('verbose'):\n pass\n\n\n if jdata['additional_info'].get('debcheck') and ((kwargs.get('debcheck_info') or 'debcheck_info' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json['debcheck'] = jdata['additional_info'].get('debcheck')\n else:\n print('\\n[+] DebCheck')\n simple_list = (\n 'vhash',\n 'tags'\n\n )\n\n dict_list = (\n 'structural_metadata',\n 'control_metadata',\n 'control_scripts'\n )\n\n complicated_dict_list = (\n 'children',\n )\n\n for key in simple_list:\n if jdata['additional_info']['debcheck'].get(key):\n self.print_key(key)\n if isinstance(jdata['additional_info']['debcheck'].get(key), list):\n print('\\t', '\\n\\t'.join(jdata['additional_info']['debcheck'].get(key)))\n elif isinstance(jdata['additional_info']['debcheck'].get(key), six.string_types):\n print('\\t', jdata['additional_info']['debcheck'].get(key))\n\n for key in dict_list:\n if jdata['additional_info']['debcheck'].get(key):\n self.print_key(key)\n plist = [[]]\n for sub_key, value in jdata['additional_info']['debcheck'][key].items():\n plist.append([sub_key, value])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template'))\n\n del plist\n\n for key in complicated_dict_list:\n if jdata['additional_info']['debcheck'].get(key):\n self.print_key(key)\n for block in jdata['additional_info']['debcheck'].get(key, {}):\n for sub_key, sub_value in block.items():\n if sub_key == 'detection_ratio':\n sub_value = '/'.join([str(ssub) for ssub in sub_value])\n print('\\t', sub_key, ':', sub_value)\n print('\\n')\n\n if jdata['additional_info'].get('androguard') and ((kwargs.get('androidguard_info') or 'androidguard_info' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json['androguard'] = jdata['additional_info'].get('androguard')\n else:\n print('\\n[+] AndroidGuard')\n simple_list = (\n 'AndroguardVersion',\n 'AndroidApplication',\n 'AndroidApplicationError',\n 'AndroidApplicationInfo',\n 'AndroidVersionCode',\n 'AndroidVersionName',\n 'VTAndroidInfo',\n 'Main Activity',\n 'MinSdkVersion',\n 'TargetSdkVersion',\n 'Package',\n 'SourceFile',\n )\n list_list = (\n 'Libraries',\n 'Activities',\n 'StringsInformation'\n )\n\n dict_list = (\n 'Permissions',\n 'RiskIndicator',\n )\n\n self.simple_print(jdata['additional_info']['androguard'], simple_list)\n self.list_print(jdata['additional_info']['androguard'], list_list)\n self.dict_print(jdata['additional_info']['androguard'], dict_list)\n\n #certificates info\n cert_list = (\n 'Subject',\n 'validto',\n 'serialnumber',\n 'thumbprint',\n 'validfrom',\n 'Issuer'\n )\n\n if jdata['additional_info']['androguard'].get('certificate'):\n for key in cert_list:\n if jdata['additional_info']['androguard']['certificate'].get(key):\n self.print_key(key)\n if key in ('Subject', 'Issuer'):\n for sub_key, sub_value in jdata['additional_info']['androguard']['certificate'].get(key).items():\n print('\\t', sub_key, ':', sub_value)\n else:\n print('\\t', jdata['additional_info']['androguard']['certificate'].get(key))\n\n if jdata['additional_info']['androguard'].get('intent-filters'):\n print('\\n[+]', 'Intent-filters')\n for key in jdata['additional_info']['androguard'].get('intent-filters'):\n print('\\t', key)\n for sub_key in jdata['additional_info']['androguard']['intent-filters'].get(key, {}):\n print('\\n\\t\\t', sub_key)\n for ssub_key in jdata['additional_info']['androguard']['intent-filters'][key].get(sub_key):\n print('\\n\\t\\t\\t', ssub_key)\n print('\\n\\t\\t\\t\\t', '\\n\\t\\t\\t\\t'.join(jdata['additional_info']['androguard']['intent-filters'][key][sub_key].get(ssub_key)))\n\n if jdata.get('email_parents') and kwargs.get('verbose'):\n print('\\n[+] Email parents:')\n for email in jdata['email_parents']:\n print('\\t{email}'.format(email=email))\n\n if jdata['additional_info'].get('referers') and kwargs.get('verbose'):\n print('\\n[+] Referers:')\n print('\\t', '\\n\\t'.join(jdata['additional_info']['referers']))\n\n # IDS, splited to be easily getted throw imported vt as library\n ids = (\n 'suricata',\n 'snort'\n )\n for key in ids:\n if jdata['additional_info'].get(key) and (kwargs.get(key) or key in args) or kwargs.get('verbose'):\n if kwargs.get('return_json'):\n return_json[key] = jdata['additional_info'].get(key)\n else:\n if jdata['additional_info'].get(key, ''):\n self.print_key(key)\n for rule in jdata['additional_info'].get(key):\n print('\\nRule:', rule)\n print('\\tAlert\\n\\t\\t', jdata['additional_info'][key][rule]['alert'])\n print('\\tClassification\\n\\t\\t', jdata['additional_info'][key][rule]['classification'])\n print('\\tDescription:')\n for desc in jdata['additional_info'][key][rule]['destinations']:\n print('\\t\\t', desc)\n\n if jdata['additional_info'].get('traffic_inspection') and (kwargs.get('traffic_inspection') or 'traffic_inspection' in args) or kwargs.get('verbose'):\n if kwargs.get('return_json'):\n return_json['traffic_inspection'] = jdata['additional_info'].get('traffic_inspection')\n else:\n if jdata['additional_info'].get('traffic_inspection'):\n print('\\n[+] Traffic inspection')\n for proto in jdata['additional_info'].get('traffic_inspection'):\n print('\\tProtocol:', proto)\n for block in jdata['additional_info'].get('traffic_inspection')[proto]:\n plist = [[]]\n for key, value in block.items():\n plist.append([key, str(value)])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template'))\n\n del plist\n\n if jdata['additional_info'].get('wireshark') and (kwargs.get('wireshark_info') or 'wireshark_info' in args) or kwargs.get('verbose'):\n if kwargs.get('return_json'):\n return_json['wireshark'] = jdata['additional_info'].get('wireshark')\n else:\n if jdata['additional_info'].get('wireshark', {}):\n print('\\n[+] Wireshark:')\n if jdata['additional_info'].get('wireshark', {}).get('pcap'):\n plist = [[]]\n for key, value in jdata['additional_info'].get('wireshark', {}).get('pcap').items():\n plist.append([key, value])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], False, ['c', 'l'], kwargs.get('email_template'))\n\n del plist\n\n if jdata['additional_info'].get('wireshark', {}).get('dns'):\n print('\\n[+] DNS')\n plist = [[]]\n key_s, value_s = get_sizes(jdata['additional_info'].get('wireshark'))\n for domain in jdata['additional_info'].get('wireshark').get('dns'):\n plist.append([domain[0], '\\n\\t'.join(domain[1])])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Domain', 'IP(s)'], False, ['r', 'l'], kwargs.get('email_template'))\n\n del plist\n\n if jdata['additional_info'].get('behaviour-v1'):\n\n\n dict_keys = (\n 'mutex',\n )\n\n if kwargs.get('verbose'):\n self.dict_list_print(jdata['additional_info']['behaviour-v1'], dict_keys)\n if jdata['additional_info']['behaviour-v1'].get('tags'):\n print('\\n[+] Tags:')\n for tag in jdata['additional_info']['behaviour-v1'].get('tags'):\n print('\\t', tag)\n\n if jdata['additional_info']['behaviour-v1'].get('dropped_files') and kwargs.get('verbose'):\n print('\\n[+] Dropped files:')\n\n plist = [[]]\n\n for files in jdata['additional_info']['behaviour-v1'].get('dropped_files'):\n plist.append([files.get('hash'), files.get('filename')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Hash(sha256?)', 'Filename'], [64, 50], ['c', 'l'], kwargs.get('email_template'))\n\n del plist\n\n\n if jdata['additional_info']['behaviour-v1'].get('network', {}) and kwargs.get('verbose'):\n print('\\n[+] Network')\n network_list = (\n 'tcp',\n 'udp'\n )\n for key in network_list:\n if jdata['additional_info']['behaviour-v1']['network'].get(key):\n plist = [[]]\n [plist.append([ip]) for ip in jdata['additional_info']['behaviour-v1']['network'].get(key)]\n pretty_print_special(plist, [key.upper()], False, False, kwargs.get('email_template'))\n\n\n # ToDo hosts\n\n if jdata['additional_info']['behaviour-v1']['network'].get('dns') and kwargs.get('verbose'):\n print('\\n[+] DNS:')\n plist = [[]]\n for block in jdata['additional_info']['behaviour-v1']['network'].get('dns'):\n plist.append([block.get('ip'), block.get('hostname')])\n pretty_print_special(plist, ['Ip', 'Hostname'], False, False, kwargs.get('email_template'))\n\n #if jdata['additional_info']['behaviour-v1']['network'].get('http'):\n # print '\\n[+] HTTP:', jdata['additional_info']['behaviour-v1']['network'].get('http')\n\n if jdata['additional_info']['behaviour-v1'].get('codesign') and kwargs.get('verbose'):\n print('\\n[+] Codesign:\\n\\t',jdata['additional_info']['behaviour-v1'].get('codesign').replace('\\n', '\\n\\t'))\n\n if jdata['additional_info']['behaviour-v1'].get('process') and kwargs.get('verbose'):\n dict_keys = (\n 'injected',\n 'shellcmds',\n 'terminated',\n 'tree'\n )\n print('\\n[+] Process')\n self.dict_list_print(jdata['additional_info']['behaviour-v1']['process'], dict_keys)\n\n if jdata['additional_info']['behaviour-v1'].get('registry') and kwargs.get('verbose'):\n dict_keys = (\n 'deleted',\n 'set'\n )\n #print '\\n[+] Registry'\n #self.dict_list_print(jdata['additional_info']['behaviour-v1']['registry'], dict_keys)\n\n if jdata['additional_info']['behaviour-v1'].get('windows') and kwargs.get('verbose'):\n dict_keys = (\n 'windows',\n 'runtime-dlls',\n 'hooking',\n 'filesystem'\n )\n self.dict_list_print(jdata['additional_info']['behaviour-v1'], dict_keys)\n\n if kwargs.get('verbose'):\n simple_list = (\n 'knockknock',\n 'tun_time',\n 'internal_tags',\n 'num_screenshots',\n 'version'\n )\n self.simple_print(jdata['additional_info']['behaviour-v1'], simple_list)\n\n if jdata['additional_info']['behaviour-v1'].get('signals') and kwargs.get('verbose'):\n print('\\n[+] Signals:')\n\n plist = [[]]\n\n for signals in jdata['additional_info']['behaviour-v1'].get('signals'):\n plist.append(\n [signals.get('cmd'), signals.get('target'), signals.get('signo'), signals.get('pid'), signals.get('walltimestamp'), signals.get('execname')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['CMD', 'Target', 'Signo', 'PID', 'WallTimeStamp', 'ExecName'], False, False, kwargs.get('email_template'))\n\n del plist\n\n if jdata['additional_info']['behaviour-v1'].get('filesystem') and kwargs.get('verbose'):\n print('\\n[+] Filesystem:')\n if jdata['additional_info']['behaviour-v1']['filesystem'].get('opened'):\n\n plist = [[]]\n\n for fs_open in jdata['additional_info']['behaviour-v1']['filesystem'].get('opened'):\n plist.append(\n [fs_open.get('success'), fs_open.get('execname'), fs_open.get('path')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Success', 'ExecName', 'Path'], [8, 20, 80], ['c', 'c', 'l'], kwargs.get('email_template'))\n\n del plist\n if jdata['additional_info']['behaviour-v1'].get('output'):\n print('\\n[+] Output:', jdata['additional_info']['behaviour-v1'].get('output'))\n\n if jdata['additional_info'].get('sigcheck') and kwargs.get('verbose'):\n\n print('\\n[+] PE signature block:')\n plist = [[]]\n for sig in jdata['additional_info']['sigcheck']:\n if isinstance(jdata['additional_info']['sigcheck'][sig], list):\n self.print_key(sig)\n for data in jdata['additional_info']['sigcheck'][sig]:\n sub_plist = [[]]\n for key in data.keys():\n sub_plist.append([key, data[key]])\n pretty_print_special(sub_plist, ['Name', 'Value'], False, False, kwargs.get('email_template'))\n del sub_plist\n else:\n plist.append(\n [sig, jdata['additional_info']['sigcheck'][sig].encode('utf-8')] # texttable unicode fail\n )\n\n pretty_print_special(plist, ['Name', 'Value'], False, False, kwargs.get('email_template'))\n del plist\n\n if jdata['additional_info'].get('exiftool') and kwargs.get('verbose'):\n self.dict_print(jdata['additional_info'], ['exiftool'])\n\n if jdata['additional_info'].get('imports') and kwargs.get('verbose'):\n self.dict_print(jdata['additional_info'], ['imports'])\n\n if jdata['additional_info'].get('dmgcheck') and kwargs.get('verbose'):\n print('\\n[+] dmgCheck:')\n\n if jdata['additional_info']['dmgcheck'].get('plst_keys'):\n print('\\n[+] plst_keys:')\n for key in jdata['additional_info']['dmgcheck']['plst_keys']:\n print('\\t{}'.format(key))\n\n if jdata['additional_info']['dmgcheck'].get('plst'):\n plist = [[]]\n\n for plst in jdata['additional_info']['dmgcheck']['plst']:\n plist.append(\n [plst.get('attributes'), plst.get('name')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Attributes', 'Name'], False, False, kwargs.get('email_template'))\n del plist\n\n dmgcheck_list = (\n 'xml_offset',\n 'xml_length',\n 'data_fork_offset',\n 'running_data_fork_offset',\n 'rsrc_fork_offset',\n )\n\n if jdata['additional_info']['dmgcheck'].get('resourcefork_keys'):\n print('\\n[+] resourcefork keys:')\n for key in jdata['additional_info']['dmgcheck']['resourcefork_keys']:\n print('\\t', key)\n\n if jdata['additional_info']['dmgcheck'].get('blkx'):\n print('\\n[+] blkx:')\n plist = [[]]\n\n for blkx in jdata['additional_info']['dmgcheck']['blkx']:\n plist.append(\n [blkx.get('attributes'), blkx.get('name')])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Attributes', 'Name'], False, False, kwargs.get('email_template'))\n\n del plist\n\n if jdata['additional_info']['dmgcheck'].get('iso') and jdata['additional_info']['dmgcheck']['iso'].get('volume_data', {}):\n print('\\n[+] Volume data')\n plist = [[]]\n for key, value in jdata['additional_info']['dmgcheck']['iso'].get('volume_data', {}).items():\n plist.append([key, value])\n\n if plist != [[]]:\n pretty_print_special(plist, ['Key', 'Value'], [22, 80], ['r', 'l', ], kwargs.get('email_template'))\n\n del plist\n\n hfs_dict_list = (\n 'executables',\n 'bundles',\n 'main_executable',\n )\n\n # ToDo\n # dmgcheck.iso.unreadable_files\n\n for pattern in ('hfs', 'iso'):\n for key in hfs_dict_list:\n if jdata['additional_info']['dmgcheck'].get(pattern):\n if jdata['additional_info']['dmgcheck'][pattern].get(key):\n self.print_key(key)\n plist = [[]]\n\n if key in ('main_executable', 'volume_data'):\n jdata['additional_info']['dmgcheck'][pattern][key] = [jdata['additional_info']['dmgcheck'][pattern][key]]\n\n for executables in jdata['additional_info']['dmgcheck'][pattern].get(key, ''):\n detection = executables.get('detection_ratio')\n detection = '{0}:{1}'.format(detection[0], detection[1])\n plist.append(\n [detection, executables.get('id'), executables.get('size', '-'), executables.get('sha256'), executables.get('path')])\n if plist != [[]]:\n pretty_print_special(plist, ['Detection', 'Id', 'Size', 'sha256', 'Path'], [10, 10, 10, 64, 50], ['c', 'c', 'c', 'c', 'l'], kwargs.get('email_template'))\n\n del plist\n\n hfs_list = (\n 'num_files',\n 'unreadable_files',\n 'dmg'\n )\n\n for key in hfs_list:\n if jdata['additional_info']['dmgcheck'][pattern].get(key):\n self.print_key(key)\n print('\\t', jdata['additional_info']['dmgcheck'][pattern][key])\n\n if jdata['additional_info']['dmgcheck'][pattern].get('info_plist', ''):\n print('\\n[+] Info plist: ')\n for key, value in jdata['additional_info']['dmgcheck'][pattern]['info_plist'].items():\n if isinstance(value, dict):\n print('\\t', key, ':')\n for subkey, subvalue in value.items():\n print('\\t\\t', subkey, ':', subvalue)\n else:\n print('\\t', key, ':', value)\n\n if jdata['additional_info'].get('compressedview') and ((kwargs.get('compressedview') or 'compressedview' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json['compressedview'] = jdata['additional_info']['compressedview']['compressedview']\n\n else:\n print('\\n[+] Compressed view:')\n if jdata['additional_info']['compressedview'].get('children') and ((kwargs.get('children') or 'children' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json['compresedview_children'] = jdata['additional_info']['compressedview']['children']\n else:\n compressedview_list = ('datetime', 'detection_ratio', 'filename', 'sha256', 'size', 'type')\n for child in jdata['additional_info']['compressedview'].get('children'):\n print('\\n')\n for key in compressedview_list:\n if child.get(key):\n self.print_key(key, indent='', separator='')\n if key == 'detection_ratio':\n print('\\t{0}/{1}'.format(child[key][0], child[key][1]))\n elif key == 'filename':\n try:\n print('\\t', child[key])\n except:\n try:\n print('\\t', child[key].encode('utf-8'))\n except:\n print('\\t[-]Name decode error')\n else:\n print('\\t', child.get(key))\n\n if jdata['additional_info']['compressedview'].get('extensions'):\n print('\\n[+] Extensions:')\n for ext in jdata['additional_info']['compressedview']['extensions']:\n print('\\t', ext, jdata['additional_info']['compressedview']['extensions'][ext])\n\n if jdata['additional_info']['compressedview'].get('file_types'):\n print('\\n[+] FileTypes')\n for file_types in jdata['additional_info']['compressedview']['file_types']:\n print('\\t' ,file_types, jdata['additional_info']['compressedview']['file_types'][file_types])\n\n if jdata['additional_info']['compressedview'].get('tags'):\n print('\\n[+] Tags:')\n for tag in jdata['additional_info']['compressedview']['tags']:\n print('\\t', tag)\n\n compressedview_add_list = (\n 'lowest_datetime',\n 'highest_datetime',\n 'num_children',\n 'type',\n 'uncompressed_size',\n 'vhash'\n )\n\n self.simple_print(jdata['additional_info']['compressedview'], compressedview_add_list)\n\n if jdata['additional_info'].get('detailed_email_parents') and ((kwargs.get('detailed_email_parents') or 'detailed_email_parents' in args) or kwargs.get('verbose')):\n\n if kwargs.get('return_json') and (kwargs.get('original-email') or 'original-email' in args):\n return_json['detailed_email_parents'] = jdata['additional_info']['detailed_email_parents']\n else:\n if not kwargs.get('return_json'):\n print('\\nDetailed email parents:')\n for email in jdata['additional_info']['detailed_email_parents']:\n if kwargs.get('email_original'):\n kwargs['value'] = [email.get('message_id')]\n parsed = self.parse_email(**kwargs)\n if parsed:\n return_json.setdefault('emails', [])\n if kwargs.get('return_json'):\n return_json['emails'].append(parsed)\n\n else:\n email_list = (\n 'subject',\n 'sender',\n 'receiver',\n 'message_id',\n\n )\n for key in email_list:\n if email.get(key):\n self.print_key(key, indent='\\n', separator='')\n print('\\t', email[key])\n\n if email.get('message'):\n print('\\nMessage:')\n if email['message'] is not None:\n for line in email['message'].split(b'\\n'):\n print(line.strip())\n\n if jdata.get('total') and kwargs.get('verbose'):\n print('\\n[+] Detections:\\n\\t{positives}/{total} Positives/Total\\n'.format(positives=jdata['positives'], total=jdata['total']))\n\n if jdata.get('scans') and kwargs.get('verbose'):\n\n plist = [[]]\n\n for x in sorted(jdata.get('scans')):\n if jdata['scans'][x].get('detected'):\n plist.append([x,\n 'True',\n jdata['scans'][x]['result'] if jdata['scans'][x]['result'] else ' -- ',\n jdata['scans'][x]['version'] if 'version' in jdata['scans'][x] and jdata['scans'][x]['version'] else ' -- ',\n jdata['scans'][x]['update'] if 'update' in jdata['scans'][x] and jdata['scans'][x]['update'] else ' -- '\n ])\n\n av_size, result_size, version = get_adequate_table_sizes(jdata['scans'])\n\n if version == 9:\n version_align = 'c'\n\n else:\n version_align = 'l'\n\n if plist != [[]]:\n pretty_print_special(plist,\n ['Vendor name', 'Detected', 'Result', 'Version', 'Last Update'],\n [av_size, 9, result_size, version, 12],\n ['r', 'c', 'l', version_align, 'c'],\n kwargs.get('email_template')\n )\n\n del plist\n\n if jdata.get('permalink') and kwargs.get('verbose'):\n print('\\nPermanent link : {permalink}\\n'.format(permalink=jdata['permalink']))\n \"\"\"\n else:\n kwargs.update({'url_report':False})\n result = parse_report(jdata, **kwargs)\n\n if kwargs.get('return_json'):\n return return_json\n else:\n return result", "def main(api_key, output_file):\n tmdb.API_KEY = api_key\n \n # find 100 valid movie_id\n i=1\n movie_id=[]\n while (len(movie_id)<100): # keep trying until it works\n try: # only things that might cause HTTP error belong in the \"try\"\n movie = tmdb.Movies(i) # this is one request\n response=movie.info()\n reviews = movie.reviews()\n review_list = reviews['results']\n if (review_list!=[]):\n movie_id.append(i) # only append those with reviews\n i=i+1\n except requests.HTTPError:\n print(\"invalid movie id\")\n i=i+1\n time.sleep(0.3) # now we have a list of valid movie id\n \n f= open(output_file,\"w+\",encoding=\"utf-8\")\n i=0\n for i in range(0,len(movie_id)):\n while True: # keep trying until it works\n try: # only things that might cause HTTP error belong in the \"try\"\n movie = tmdb.Movies(movie_id[i]) # this is one request\n reviews = movie.reviews() # one request\n break\n except requests.HTTPError:\n print(\"HTTPError, waiting 3 seconds\")\n time.sleep(0.3)\n review_list = reviews['results']\n for review_dict in review_list:\n auth = review_dict['author']\n text = review_dict['content']\n text=re.sub(r'\\n',' ',text)\n text=re.sub(r'\\t',' ',text)\n text=re.sub(r'\\r',' ',text)\n f.write(f\"{movie_id[i]}\\t{auth}\\t{text}\\n\")\n i=i+1\n f.close()", "def calc_okapi_tf(self, query, query_no, avg_doc_length):\n okapi_tf_scores = {}\n f_okapi_tf = open(\"Results/okapi_tf_output.txt\",'a')\n query_array = []\n ic = client.IndicesClient(self.es)\n\n analyzed_result = ic.analyze(index=\"ap_dataset\",analyzer=\"my_english\",body=query)\n token_length = len(analyzed_result['tokens'])\n for i in range(token_length):\n query_array.append(str(analyzed_result['tokens'][i]['token']))\n\n query_body = {\"query\":\n {\"function_score\": {\"query\": {\"match\": {\"text\": query}},\n \"functions\": [\n {\"script_score\":\n {\"script\": \"getOkapiTF\", \"lang\": \"groovy\",\n \"params\": {\"query\": query_array, \"field\": \"text\",\n \"avgLength\": avg_doc_length}}}],\n \"boost_mode\": \"replace\"}}, \"fields\":[\"stream_id\"]}\n\n okapi_result = self.es.search(index=\"ap_dataset\", doc_type=\"document\", size=self.search_size,\n analyzer=\"my_english\", body=query_body)\n result_size = len(okapi_result['hits']['hits'])\n\n rank = 1\n for i in range(result_size):\n doc_id = str(okapi_result['hits']['hits'][i]['_id'])\n score = okapi_result['hits']['hits'][i]['_score']\n if score != 0:\n f_okapi_tf.write(query_no + \" Q0 \" + doc_id + \" \" + str(rank) + \" \" + str(score) + \" Exp\\n\")\n okapi_tf_scores[doc_id] = score\n rank += 1\n f_okapi_tf.close()\n return okapi_tf_scores", "def process_results(result, title=\"\", doWrite=True, query=\"\"):\n\n\tif not result or not result.returns_rows:\n\t\tprint \"-> 0 records returned\"\n\t\tprint result\n\t\treturn\n\n\tdoPrint = True\n\n\t# get column names\n\tcols = result.keys()\n\n\t# build template format string\n\ttemplate = \"\"\n\tfor i in range(len(cols)):\n\t\ttemplate += \"{%s:40}\" % i\n\ttemplate = template.replace(\"}{\", \"}|{\")\n\n\t# open output file\n\tif doWrite:\n\t\tresults_dir = DATA_PATH + \"query_results/\"\n\t\tif not os.path.exists(results_dir):\n\t\t\tos.makedirs(results_dir)\n\n\t\tnow = datetime.now().strftime(\"%Y_%m_%d__%I_%M_%p\")\n\t\tfilename = results_dir + \"query_results_\" + now\n\t\t\n\t\ttxt = open(filename+\".txt\", 'w')\n\t\tcsv = open(filename+\".csv\", 'w')\n\n\t\t# print query\n\t\ttxt.write(query + \"\\n\")\n\t\ttxt.write(\"=\" * len(query) + \"\\n\")\n\t\tcsv.write(query + \"\\n\")\n\n\t\t# print header\n\t\ttxt.write(template.format(*tuple(cols)) + \"\\n\") # header\n\t\ttxt.write(\"-\"*41*len(cols) + \"\\n\")\n\t\tcsv.write('|'.join(cols) + \"\\n\")\n\n\t# print records\n\tnumRecs = 0\n\tfor row in result:\n\t\t# ask user if they want ot stop printing after 100 records\n\t\tif numRecs == 10:\n\t\t\ti = raw_input(\"10 records printed. Continue printing records? (y/N) \")\n\t\t\tif i.lower() not in [\"y\", \"yes\"]:\n\t\t\t\tdoPrint = False\n\t\t\n\t\tif doPrint:\n\t\t\tscreen_write_row(row, title)\n\n\t\tif doWrite:\n\t\t\ttxt_write_row(txt, template, row)\n\t\t\tcsv_write_row(csv, row)\n\n\t\t# count records returned\n\t\tnumRecs += 1\n\n\t# show number of records returned\n\tprint \"-> %s records returned\" % numRecs\n\tif doWrite:\n\t\ttxt.write(\t\"-> %s records returned\" % numRecs)\n\n\tif doWrite:\n\t\ttxt.flush()\n\t\ttxt.close()", "def fileCheck(path):\n print('[+] Checking For File patching ')\n for url in check_files:\n try:\n #File Rereive\n data = query(url)\n file_name = url.split(\"/\")[-1]\n _,tmp_file = tempfile.mkstemp(prefix=\"exitmap_%s_\" % file_name)\n with open(tmp_file, \"wb\") as fd:\n fd.write(data)\n for i in check_files_patch_results:\n if str(i.url) == str(url):\n if str(i.filehash) != str(sha512_file(tmp_file)):\n print('[+] ALERT File Patch FOUND !')\n print(' | exitnode : %s' % str(i.exitnode) )\n print(' |_________> url: %s' % str(i.url) )\n print(' |_________> filePath: %s' % str(i.filepath) )\n print(' |_________> fileHash: %s' % str(i.filehash) )\n #check_files_patch_results.append( File_Check_Results(url, file_name, tmp_file, path, sha512_file(tmp_file)) )\n else :\n print('[+] File (%s) seems to be ok' % i.url)\n break\n\n except Exception as err:\n print('[-] Error ! %s' % err)\n traceback.print_exc()\n pass\n return time.time()", "def query(self, query):\n queryFile = self.__cacheLocation + \"/\" + query + \".json\"\n if os.path.isfile(queryFile):\n reply = json.load(open(queryFile, \"r\"))\n return reply\n reply = self.__fmqlIF.query(query)\n jreply = json.loads(reply)\n jcache = open(self.__cacheLocation + \"/\" + query + \".json\", \"w\")\n json.dump(jreply, jcache)\n jcache.close()\n # logging.info(\"Cached \" + query)\n return jreply", "def finalize_result(self):\n logging.debug(\"finalize_result()\")\n with open(self.html_file, \"a\") as result_file:\n result_file.write(\"<br/>Analyzis successful\")\n with open(self.txt_file, \"a\") as result_file:\n result_file.write(\"Analyzis successful\")", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('Downloading data set from DC Open data')\n\n with open(input_filepath, 'r') as f:\n parking_violations = json.load(f)\n\n for fullname, csv in parking_violations.items():\n download_file = csv + '.csv'\n local_filename = '_'.join(name.lower() for name in fullname.split() ) + '.csv'\n local_filename = os.path.join(output_filepath, local_filename)\n if not os.path.isfile(local_filename):\n time.sleep(5)\n r = requests.get(download_file)\n if not b'\"status\":\"Processing\",\"generating\":{}' in r.content:\n with open(local_filename, 'wb') as f:\n f.write(r.content)\n logger.info(local_filename)\n else:\n logger.warning('Cannot download {0}'.format(local_filename))" ]
[ "0.6289538", "0.6239065", "0.6121396", "0.60783744", "0.6044701", "0.6038592", "0.6030916", "0.6013815", "0.5947279", "0.5914782", "0.5909344", "0.58337414", "0.5818949", "0.5799132", "0.57944274", "0.5789064", "0.5784416", "0.5783246", "0.5760673", "0.5746683", "0.570731", "0.5700574", "0.56853676", "0.56744534", "0.5669756", "0.56663793", "0.56407464", "0.5639775", "0.56379914", "0.5624713", "0.56165963", "0.5604409", "0.5600455", "0.5593709", "0.55830646", "0.55780816", "0.5568276", "0.5567676", "0.5560305", "0.55417675", "0.55381507", "0.553332", "0.552199", "0.5521038", "0.55090517", "0.55085504", "0.5503403", "0.54944354", "0.5472528", "0.5471935", "0.5467894", "0.54583716", "0.5458151", "0.5457766", "0.54562384", "0.54524964", "0.5442132", "0.54399025", "0.54331625", "0.54298365", "0.54296607", "0.5419649", "0.5415491", "0.5406708", "0.54039454", "0.5402674", "0.53860253", "0.53832906", "0.5380118", "0.5374131", "0.5372619", "0.53695446", "0.5367871", "0.5363848", "0.53622234", "0.53525907", "0.53512925", "0.53509486", "0.5349913", "0.5341992", "0.53400874", "0.5335964", "0.53292483", "0.5327523", "0.5325524", "0.5321", "0.5317752", "0.5316637", "0.5315506", "0.53102565", "0.53047895", "0.5301065", "0.5291866", "0.5291261", "0.52901214", "0.52885157", "0.52862686", "0.5282789", "0.5281774", "0.52808464", "0.5280371" ]
0.0
-1
This function is coming from the test_mechanism.py file. We only use the part that generates the url. Other parts are calling test that fail because we do not have the whole navitia running. Thus, we do not need the "self" parameter, and response_checker is set to None. We have also added parts of other functions into it. Therefore, we only need to call journey and all the test are done from inside.
def journey( self, _from, to, datetime, datetime_represents="departure", first_section_mode=[], last_section_mode=[], forbidden_uris=[], direct_path_mode=[], **kwargs ): # Creating the URL with all the parameters for the query assert datetime query = "from={real_from}&to={real_to}&datetime={date}&datetime_represents={represent}".format( date=datetime, represent=datetime_represents, real_from=_from, real_to=to ) for mode in first_section_mode: query = "{query}&first_section_mode[]={mode}".format(query=query, mode=mode) for mode in last_section_mode: query = "{query}&last_section_mode[]={mode}".format(query=query, mode=mode) for mode in direct_path_mode: query = "{query}&direct_path_mode[]={mode}".format(query=query, mode=mode) for uri in forbidden_uris: query = "{query}&forbidden_uris[]={uri}".format(query=query, uri=uri) for k, v in six.iteritems(kwargs): query = "{query}&{k}={v}".format(query=query, k=k, v=v) # Override scenario if self.__class__.data_sets[0].scenario in [ "distributed", "experimental", "asgard", ]: overridden_scenario = "distributed" else: overridden_scenario = "new_default" query = "{query}&_override_scenario={scenario}".format( query=query, scenario=overridden_scenario ) # launching request dans comparing self.request_compare("journeys?" + query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_tour_complete_url_redirect(self):\n # complete tour and try to go to first step\n self.tour1.load_tour_class().add_user(self.test_user)\n mock_request = Mock(user=self.test_user, path='mock1', method='get', GET={})\n mock_view = MockView(request=mock_request)\n response = mock_view.dispatch(mock_request)\n self.assertEqual(302, response.status_code)\n self.assertEqual('mock_complete1', response.url)", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def setUp(self):\n self.reactor = self.Reactor()\n self.url = 'https://www.example.com/someresource.html#andatag'", "def setUp(self):\n\n # Json response\n self.json_pass_times = {\n \"message\": \"success\",\n \"request\": {\n \"altitude\": 100,\n \"datetime\": 1481418788,\n \"latitude\": 15.0,\n \"longitude\": 20.0,\n \"passes\": 5\n },\n \"response\": [\n {\n \"duration\": 348,\n \"risetime\": 1481448840\n },\n {\n \"duration\": 634,\n \"risetime\": 1481454465\n },\n {\n \"duration\": 220,\n \"risetime\": 1481460482\n },\n {\n \"duration\": 224,\n \"risetime\": 1481484335\n },\n {\n \"duration\": 640,\n \"risetime\": 1481489937\n }\n ]\n }\n\n self.location = self.json_pass_times['response']\n\n #HTTP Mock\n @all_requests\n def correct_response(url, request):\n headers = {'content-type': 'application/json',\n 'Set-Cookie': 'foo=bar;'}\n return response(200, self.json_pass_times, headers, None, 5,\n request)\n self.http_correct = correct_response\n\n @all_requests\n def wrong_response(url, request):\n headers = {'content-type': 'application/json',\n 'Set-Cookie': 'foo=bar;'}\n return response(403, self.json_pass_times, headers, None, 5,\n request)\n self.http_wrong = wrong_response\n\n self.iss = pyiss.ISS()", "def setUp(self):\n self.response = self.s.get(self.url, params=self.params)", "def setUp(self):\n self.anime_link = \"https://animepertutti.com/sword-art-online-alicization-war-of-underworld-sub-ita-streaming-download-z\" #noqa", "def test_analytics_proxy_url(self, act):\r\n act.return_value = self.FakeProxyResponse()\r\n\r\n url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {\r\n 'aname': 'ProblemGradeDistribution'\r\n })\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # check request url\r\n expected_url = \"{url}get?aname={aname}&course_id={course_id!s}&apikey={api_key}\".format(\r\n url=\"http://robotanalyticsserver.netbot:900/\",\r\n aname=\"ProblemGradeDistribution\",\r\n course_id=self.course.id.to_deprecated_string(),\r\n api_key=\"robot_api_key\",\r\n )\r\n act.assert_called_once_with(expected_url)", "def setUp(self):\n self.original_url = \"https://ultimaker.com/en/knowledge/33-reducing-costs-and-improving-efficiency-with-the-ultimaker-s5\"\n self.short_code = \"asv12Hb91c\"\n self.url_mapping = URLMapping(short_code=self.short_code, original_url=self.original_url)", "def test_get_dealer_landing_page(self):\n pass", "def test_get_next_url_none(self):\n self.assertIsNone(Tour.objects.get_next_url(self.test_user))", "def test_make_pathways(self):\n basic_test_runner(self, 'pathways')", "def test_get_goal(self):\n pass", "def test_home_by_Next_tram(self):\r\n result = self.app.get('/prochain/1/ANTIGONE/MOSSON')\r\n self.assertTrue(b'LIGNE' in result.data)\r\n self.assertTrue(b'ARRET' in result.data)\r\n self.assertTrue(b'DESTINATION' in result.data)", "def test_get_next_url_current_tour(self, mock_get_for_user, mock_step1_is_complete):\n mock_get_for_user.return_value = self.tour1\n mock_step1_is_complete.return_value = False\n\n self.tour1.steps.add(self.step1)\n\n # add user to tour\n self.tour1.load_tour_class().add_user(self.test_user)\n self.tour1.load_tour_class().add_user(self.test_user2)\n self.tour2.load_tour_class().add_user(self.test_user)\n self.tour2.load_tour_class().add_user(self.test_user2)\n\n self.assertEqual(self.step1.url, Tour.objects.get_next_url(self.test_user))\n self.assertEqual(1, mock_get_for_user.call_count)", "def test_request_unrelated_page(self, mock_step1_is_complete):\n mock_step1_is_complete.return_value = False\n\n self.tour1.load_tour_class().add_user(self.test_user)\n\n # request page that isn't in the tour before tour is complete\n mock_request = Mock(user=self.test_user, path='mock-fake', method='get', GET={})\n mock_view = MockView(request=mock_request)\n response = mock_view.dispatch(mock_request)\n self.assertEqual(200, response.status_code)", "def test_redirection(self):\n self.assertRedirects(self.response, self.home_url)", "def test_request_first_step(self, mock_step1_is_complete):\n mock_step1_is_complete.return_value = False\n\n self.tour1.load_tour_class().add_user(self.test_user)\n mock_request = Mock(user=self.test_user, path='mock1', method='get', GET={})\n mock_view = MockView(request=mock_request)\n response = mock_view.dispatch(mock_request)\n self.assertEqual(200, response.status_code)", "def test_redirect_from_future_step(self, mock_step1_is_complete):\n mock_step1_is_complete.return_value = False\n\n # do request to second step when we should be on first\n self.tour1.load_tour_class().add_user(self.test_user)\n mock_request = Mock(user=self.test_user, path='mock2', method='get', GET={})\n mock_view = MockView(request=mock_request)\n response = mock_view.dispatch(mock_request)\n self.assertEqual(302, response.status_code)\n self.assertEqual('mock1', response.url)", "def __init__(self, start_url, goal_url):\n super(Problem, self).__init__()\n self.start_url = start_url\n self.goal_url = goal_url\n self.scraper = Scraper()\n self.base_url = 'https://fr.wikipedia.org'", "def setUp(self):\n url = reverse('signup')\n self.response = self.client.get(url)", "def _api_call(self, url, response_checker):\n self.request_compare(url)", "def setUp(self):\n self.response = self.client.get('/')", "def test_tour_complete_unrelated_page(self):\n # request page that isn't in the tour when tour is complete\n self.tour1.load_tour_class().add_user(self.test_user)\n mock_request = Mock(user=self.test_user, path='mock-fake', method='get', GET={})\n mock_view = MockView(request=mock_request)\n response = mock_view.dispatch(mock_request)\n self.assertEqual(200, response.status_code)", "def forward_test(self, *args, **kwargs):\n pass", "def test_init(self):\n self.assertEqual(self.request_mgr.f2f_search_url,\n \"https://www.food2fork.com/api/search\")\n self.assertEqual(self.request_mgr.f2f_detail_url,\n \"https://www.food2fork.com/api/get\")", "def test_get_proxied_url_2(self):\n test_urlpattern = URLPattern(url=\"platform.almanhal.com\")\n self.assertEqual(test_urlpattern.get_proxied_url, \"platform-almanhal-com\")", "async def test_url(\n self,\n resp: httpx.Response,\n data: Dict[str, Any],\n query: Dict[str, Any], # pylint: disable=unused-argument\n ):\n code: str = \"\"\n if resp.url.path == \"/void/callback\":\n code = resp.url.query.get(\"code\")\n if resp.url.path == \"/static/404.html\":\n code = URL(str(resp.history[-1].url)).query.get(\"code\")\n if code:\n username = data.get(\"identity\")\n self._callback_url = self.init_query.get(\"callback_url\")\n self.waf_retry = 0\n _LOGGER.debug(\"Success! Oauth code %s for %s captured.\", code, username)\n await self.session.aclose()\n # 302 redirect\n return URL(self._callback_url).update_query(\n {\"code\": code, \"username\": username, \"domain\": self._host_url.host}\n )\n if get_content_type(resp) == \"text/html\":\n text = resp.text\n if \"<noscript>Please enable JavaScript to view the page content.\" in text:\n _LOGGER.debug(\"WAF discovered %s times in a row.\", self.waf_retry)\n self.waf_retry += 1\n return return_timer_countdown_refresh_html(\n max(30 * (self.waf_retry - self.waf_limit), 120)\n if self.waf_retry > self.waf_limit\n else random.random() * self.waf_retry + 10,\n f\"Detected Tesla web application firewall block #{self.waf_retry}. \"\n f\"Please wait and then reload the page or wait for the auto reload.\",\n False,\n )\n self.waf_retry = 0\n if get_content_type(resp) == \"application/json\":\n text = orjson.loads(resp.text) # pylint: disable=no-member\n _LOGGER.debug(\"Json response: %s\", text)", "def setUp(self):\n self.url = \"https://www.loc.gov/item/mss859430021?fo=json\"", "def setUp(self):\n self.schedule_route_url = api_reverse('route:schedule-route')\n self.login_url = api_reverse('authentication:user-login')\n self.join_route_url = api_reverse('route:join-route')\n self.retrieve_route_url = api_reverse('route:retrieve-route')\n self.register_vehicle_url = api_reverse('vehicle:register-vehicle')\n\n\n\n\n self.user_one = User.objects.create_user(\n first_name='jane1',\n last_name='Doe1',\n surname='jDoe1',\n email='jane1@doe.com',\n password='janeDoe@123',\n id_number=1223,\n phone_number=\"+254712534545\",\n is_active=True)\n\n self.user_two = User.objects.create_user(\n first_name='rose',\n last_name='mary',\n surname='mary',\n email='mary@mary.com',\n username=\"rosemary\",\n password='janeDoe@123',\n id_number=122843,\n phone_number=\"+2547129743545\",\n is_active=True)\n\n self.user_three = User.objects.create_user(\n first_name='Three',\n last_name='Mine',\n surname='James',\n email='user@three.com',\n username=\"Three\",\n password='janeDoe@123',\n id_number=1228444,\n phone_number=\"+2547179743545\",\n is_active=True)\n\n self.valid_route_details = {\n \"destination\": {\"latitude\": 37.0625,\"longitude\": -95.677068},\n \"starting_point\": {\"latitude\": 37.0625,\"longitude\": -95.677068},\n \"commuting_time\": \"17:00\"\n }\n self.valid_route_two_details = {\n \"destination\": {\"latitude\": 31.0625,\"longitude\": -95.677068},\n \"starting_point\": {\"latitude\": 31.0625,\"longitude\": -95.677068},\n \"commuting_time\": \"17:00\"\n }\n\n self.valid_user_login_details = {\n 'email': 'jane1@doe.com',\n 'password': 'janeDoe@123',\n }\n self.valid_user_two_login_details = {\n 'email': 'mary@mary.com',\n 'password': 'janeDoe@123',\n }\n self.valid_user_three_login_details = {\n 'email': 'user@three.com',\n 'password': 'janeDoe@123',\n }\n self.token = self.login_user().data['token']\n self.token_two = self.login_user_two().data['token']\n self.token_three = self.login_user_three().data['token']\n self.route ={\n 'route':self.get_route_object().id\n }\n\n self.valid_vehicle_details = {\n \"registration_number\": \"KAC236Q\",\n \"capacity\": \"5\"\n }\n vehicle = self.register_vehicle()\n self.vehicle_id = {\n 'vehicle': vehicle.id\n }", "def run(self):\r\n logging.info(\"Now excecuting test step {}\".format(self.stepname))\r\n try:\r\n response = eval(\"requests.{}('{}',params={})\".format(self.verb, self.url, self.payload))\r\n return response, True\r\n\r\n except requests.exceptions.RequestException as e:\r\n logging.warn(\"test {} failed\".format(self.stepname))\r\n \r\n return None, False", "def test_about_view(self):\n print 'Running %s ...' % getName()\n# test that URL resolves to correct views function \n found = resolve('/sequencelistings/about/')\n self.assertEqual(found.func, views.about)\n \n self.sequenceListingFixture.create_sequence_instance(self.sequenceListing)\n \n response = self.client.get(reverse('sequencelistings:about'))\n self.assertEqual(response.status_code, 200)\n \n# test that the page returns expected html contents\n self.assertContains(response, 'About')\n self.assertContains(response, 'only for information purposes')", "def test_get_next_url_no_user(self):\n self.assertIsNone(Tour.objects.get_next_url(User()))", "def _make_url(self):\n ...", "def setUp(self):\n self.tool = flow_common_tool()\n self.xml = xml_tool()\n self.ins = route()\n\n self.response = {}\n self.response[\"HA_SINGLE_INSTANCE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>7</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_MULTI_INSTANCE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>7</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private1__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private1__.inet.0</irib-name>\n <irib-active-count>12</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private2__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private2__.inet.0</irib-name>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>1</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private3__</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private4__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private4__.inet.0</irib-name>\n <irib-active-count>2</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__master.anon__</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n <instance-core>\n <instance-name>mgmt_junos</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n </instance-information>\n \"\"\"\n\n\n self.response[\"HA_SINGLE_INSTANCE_BRIEF\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>18</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>1</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_DETAIL\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"detail\">\n <instance-core>\n <instance-name>master</instance-name>\n <router-id>10.208.133.147</router-id>\n <instance-type>forwarding</instance-type>\n <instance-state>Active</instance-state>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-route-count>18</irib-route-count>\n <irib-active-count>18</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-route-count>1</irib-route-count>\n <irib-active-count>1</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"detail\">\n <instance-core>\n <instance-name>master</instance-name>\n <router-id>10.208.133.147</router-id>\n <instance-type>forwarding</instance-type>\n <instance-state>Active</instance-state>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-route-count>20</irib-route-count>\n <irib-active-count>20</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.2</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.3</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>iso.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>mpls.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>__mpls-oam__.mpls.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-route-count>5</irib-route-count>\n <irib-active-count>5</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.2</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.3</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>l2circuit.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>mdt.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>l2protection.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>lsdist.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>lsdist.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inetcolor.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6color.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>5</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"SA_INSTANCE_TEXT\"] = \"\"\"\nInstance Type\n Primary RIB Active/holddown/hidden\nmaster forwarding\n inet.0 18/0/0\n\n__juniper_private1__ forwarding\n __juniper_private1__.inet.0 6/0/0\n\n__juniper_private2__ forwarding\n __juniper_private2__.inet.0 0/0/1\n\n__juniper_private3__ forwarding\n\n__juniper_private4__ forwarding\n __juniper_private4__.inet.0 2/0/0\n\n__master.anon__ forwarding\n \"\"\"", "def configure(self):\n\n '''The method makes a test to get the site info'''\n domain = 'http://localhost:8888'\n webservice_url = '/webservice/rest/server.php?'\n parameters = {\n 'wstoken': self.token,\n 'wsfunction': 'core_webservice_get_site_info',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain + webservice_url, params=parameters)\n request = request.json()\n\n if 'exception' in request:\n if request['exception'] == \"moodle_exception\":\n if request['errorcode'] == 'invalidtoken':\n return self._reopen_form()", "def setUp(self):\r\n super(OpenResponseTest, self).setUp()\r\n\r\n # Create page objects\r\n self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)\r\n self.course_info_page = CourseInfoPage(self.browser, self.course_id)\r\n self.tab_nav = TabNavPage(self.browser)\r\n self.course_nav = CourseNavPage(self.browser)\r\n self.open_response = OpenResponsePage(self.browser)\r\n self.peer_grade = PeerGradePage(self.browser)\r\n self.peer_calibrate = PeerCalibratePage(self.browser)\r\n self.peer_confirm = PeerConfirmPage(self.browser)\r\n self.progress_page = ProgressPage(self.browser, self.course_id)\r\n\r\n # Configure the test course\r\n course_fix = CourseFixture(\r\n self.course_info['org'], self.course_info['number'],\r\n self.course_info['run'], self.course_info['display_name']\r\n )\r\n\r\n # Create a unique name for the peer assessed problem. This will show up\r\n # in the list of peer problems, which is shared among tests running\r\n # in parallel; it needs to be unique so we can find it.\r\n # It's also import that the problem has \"Peer\" in the name; otherwise,\r\n # the ORA stub will ignore it.\r\n self.peer_problem_name = \"Peer-Assessed {}\".format(self.unique_id[0:6])\r\n\r\n course_fix.add_children(\r\n XBlockFixtureDesc('chapter', 'Test Section').add_children(\r\n XBlockFixtureDesc('sequential', 'Test Subsection').add_children(\r\n\r\n XBlockFixtureDesc('combinedopenended', 'Self-Assessed',\r\n data=load_data_str('ora_self_problem.xml'), metadata={'graded': True}),\r\n\r\n XBlockFixtureDesc('combinedopenended', 'AI-Assessed',\r\n data=load_data_str('ora_ai_problem.xml'), metadata={'graded': True}),\r\n\r\n XBlockFixtureDesc('combinedopenended', self.peer_problem_name,\r\n data=load_data_str('ora_peer_problem.xml'), metadata={'graded': True}),\r\n\r\n # This is the interface a student can use to grade his/her peers\r\n XBlockFixtureDesc('peergrading', 'Peer Module'),\r\n\r\n ))).install()\r\n\r\n # Configure the XQueue stub's response for the text we will submit\r\n # The submission text is unique so we can associate each response with a particular test case.\r\n self.submission = \"Test submission \" + self.unique_id[0:4]\r\n if self.XQUEUE_GRADE_RESPONSE is not None:\r\n XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE).install()\r\n\r\n # Log in and navigate to the essay problems\r\n self.auth_page.visit()\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Courseware')", "def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route", "def test_valid_flow__registration(self):\n\n test_env = {\n \"testapp_authority\": self.testapp_authority,\n \"testapp_app\": self.testapp_app,\n \"extra_environ_app\": {\n \"wsgi.url_scheme\": \"https\",\n \"HTTP_HOST\": \"app.example.com\",\n },\n \"extra_environ_authority\": {\n \"wsgi.url_scheme\": \"https\",\n \"HTTP_HOST\": \"authority.example.com\",\n },\n \"requests_session_app\": requests.Session(),\n \"requests_session_authority\": requests.Session(),\n }\n\n def callback__request_token(req, test_env=test_env):\n \"\"\"/authority/oauth1/request_token is visited by the Server\n\n py3 needs the 'unicode' wrapper to decode the bystring\n \"\"\"\n assert \"Authorization\" in req.headers\n assert req.headers[\"Authorization\"].decode(\"utf-8\").startswith(\"OAuth \")\n assert \"User-Agent\" in req.headers\n assert req.headers[\"User-Agent\"].decode(\"utf-8\") == \"CustomApiClient v0\"\n assert req.url == oauth1_utils.CustomApiClient.OAUTH1_SERVER_REQUEST_TOKEN\n\n # request as SERVER, no cookies\n with IsolatedTestapp(test_env[\"testapp_authority\"]) as testapp:\n res = testapp.get(\n \"/authority/oauth1/request_token\",\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__authenticate_get(req, test_env=test_env):\n \"\"\"/authority/oauth1/authorize is visited by the USER\"\"\"\n assert req.url.startswith(OAUTH1__URL_AUTHORITY_AUTHENTICATE)\n qs = req.url.split(\"?\")[1]\n qs = dict(parse_qsl(qs))\n\n testapp = test_env[\"testapp_authority\"]\n res = testapp.get(\n \"/authority/oauth1/authorize?oauth_token=%s\" % qs[\"oauth_token\"],\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__authenticate_post(req, test_env=test_env):\n \"\"\"/authority/oauth1/authorize is visited by the USER\"\"\"\n assert req.url.startswith(OAUTH1__URL_AUTHORITY_AUTHENTICATE)\n payload = dict(parse_qsl(req.body))\n\n testapp = test_env[\"testapp_authority\"]\n res = testapp.post(\n \"/authority/oauth1/authorize\",\n payload,\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=302,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__callback(req, test_env=test_env):\n \"\"\"/application/flow-register/authorized-callback is visited by the USER\"\"\"\n _path, _qs = req.url.split(\"?\")\n\n testapp = test_env[\"testapp_app\"]\n res = testapp.get(\n \"/application/flow-register/authorized-callback?%s\" % _qs,\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n test_env[\"requests_session_app\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '303 See Other'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__access_token(req, test_env=test_env):\n \"\"\"/authority/oauth1/access_token is visited by the Server\"\"\"\n assert \"Authorization\" in req.headers\n assert req.headers[\"Authorization\"].decode(\"utf-8\").startswith(\"OAuth \")\n assert \"User-Agent\" in req.headers\n assert req.headers[\"User-Agent\"].decode(\"utf-8\") == \"CustomApiClient v0\"\n assert req.url == oauth1_utils.CustomApiClient.OAUTH1_SERVER_ACCESS_TOKEN\n\n # request as SERVER, no cookies\n with IsolatedTestapp(test_env[\"testapp_authority\"]) as testapp:\n _headers = string_headers(\n req.headers\n ) # these can end up being unicode in tests\n res = testapp.get(\n \"/authority/oauth1/access_token\",\n headers=_headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__callback_success(req, test_env=test_env):\n \"\"\"/application/flow-register/authorized-callback-success is visited by the USER\"\"\"\n (_path, _qs) = parse_request_simple(req)\n\n testapp = test_env[\"testapp_application\"]\n _headers = string_headers(\n req.headers\n ) # these can end up being unicode in tests\n res = testapp.get(\n \"/application/flow-register/authorized-callback-success?%s\" % _qs,\n headers=_headers,\n extra_environ=test_env[\"extra_environ_app\"],\n status=200,\n )\n test_env[\"requests_session_application\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n with responses.RequestsMock() as rsps:\n rsps.add_callback(\n responses.GET,\n oauth1_utils.CustomApiClient.OAUTH1_SERVER_REQUEST_TOKEN, # /authority/oauth1/request_token\n callback=callback__request_token,\n )\n rsps.add_callback(\n responses.GET,\n oauth1_utils.CustomApiClient.OAUTH1_SERVER_ACCESS_TOKEN, # /authority/oauth1/access_token\n callback=callback__access_token,\n )\n\n # the following were originally handled via `requests.get` but migrated to direct webtest queries\n #\n # rsps.add_callback(\n # responses.GET, OAUTH1__URL_AUTHORITY_AUTHENTICATE, # /authority/oauth1/authorize\n # callback=callback__authenticate_get,\n # )\n # rsps.add_callback(\n # responses.POST, OAUTH1__URL_AUTHORITY_AUTHENTICATE, # /authority/oauth1/authorize\n # callback=callback__authenticate_post,\n # )\n # rsps.add_callback(\n # responses.GET, oauth1_model.OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK, # https://example.com/application/flow-register/authorized-callback\n # callback=callback__callback,\n # )\n # rsps.add_callback(\n # responses.GET, oauth1_model.OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK_SUCCESS, # https://example.com/application/flow-register/authorized-callback-success\n # callback=callback__callback_success,\n # )\n\n #\n # actual test flow...\n #\n\n # first we need to log into the oAuth1 Authority\n # the authority is the account which will be the oAuth identity provider (e.g. Twitter)\n\n # User visit\n res = self.testapp_authority.get(\n \"/authority/account/login-form\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n assert res.text == \"authority|login-form\"\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n\n # User visit\n res = self.testapp_authority.get(\n \"/authority/account/login-submit\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=303,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /authority/account/home; you should be redirected automatically.\\n\\n\"\"\"\n )\n\n # User visit\n res = self.testapp_authority.get(\n \"/authority/account/home\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n assert (\n res.text\n == \"authority|home|user=%s\" % oauth1_model.USERID_ACTIVE__AUTHORITY\n )\n\n #\n # now we want to visit the application\n #\n\n # User visit's the application\n #\n res = self.testapp_app.get(\n \"/application/flow-register\",\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n test_env[\"requests_session_app\"].cookies.update(\n self.testapp_app.cookies\n ) # update the session with the cookies from the response\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /application/flow-register/oauth1/start; you should be redirected automatically.\\n\\n\"\"\"\n )\n\n # User visit\n # however, it makes a behind the scenes visit to\n # * /authority/oauth1/request_token\n res = self.testapp_app.get(\n \"/application/flow-register/oauth1/start\",\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n test_env[\"requests_session_app\"].cookies.update(\n self.testapp_app.cookies\n ) # update the session with the cookies from the response\n assert \"Location\" in res.headers\n url_auth = res.headers[\"Location\"]\n assert res.headers[\"Location\"].startswith(\n OAUTH1__URL_AUTHORITY_AUTHENTICATE\n )\n\n # resAuthInbound = test_env['requests_session_authority'].get(url_auth)\n # then the user is redirected to the authority to approve\n qs = url_auth.split(\"?\")[1]\n url_auth_local = \"/authority/oauth1/authorize?%s\" % qs\n resAuthInbound = self.testapp_authority.get(\n url_auth_local, extra_environ=test_env[\"extra_environ_authority\"]\n )\n assert (\n '<form action=\"/authority/oauth1/authorize\" method=\"POST\" id=\"app-action-authorize\">'\n in resAuthInbound.text\n )\n csrfs = re_csrf.findall(resAuthInbound.text)\n assert len(csrfs) == 2 # submit, deny\n tokens = re_token.findall(resAuthInbound.text)\n assert len(tokens) == 2 # submit, deny\n\n payload = {\n \"csrf_\": csrfs[0],\n \"oauth_token\": tokens[0],\n \"submit\": \"authorize\",\n }\n # payload = {'csrf_': csrfs[0], 'oauth_token': tokens[0], 'submit': 'authorize', }\n\n # visited by USER: Authorize the application on the Authority\n resAuthApprove = self.testapp_authority.post(\n \"/authority/oauth1/authorize\",\n payload,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=302,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n\n # visited by USER: redirected to the callback page on the APPLICATION\n assert \"Location\" in resAuthApprove.headers\n url_callback = resAuthApprove.headers[\"Location\"]\n assert url_callback.startswith(OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK)\n qs = url_callback.split(\"?\")[1]\n url_callback_local = (\n \"/application/flow-register/authorized-callback?%s\" % qs\n )\n resAuthCallback = self.testapp_app.get(\n url_callback_local,\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n\n # visited by USER: redirected to the callback-success page on the APPLICATION\n assert \"Location\" in resAuthCallback.headers\n url_callback_success = resAuthCallback.headers[\"Location\"]\n assert url_callback_success.startswith(\n OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK_SUCCESS\n )\n assert len(url_callback_success.split(\"?\")) == 1\n url_callback_success_local = (\n \"/application/flow-register/authorized-callback-success\"\n )\n resAuthCallbackSuccess = self.testapp_app.get(\n url_callback_success_local,\n extra_environ=test_env[\"extra_environ_app\"],\n status=200,\n )\n assert (\n resAuthCallbackSuccess.text\n == \"application|register|authorized-callback-success|user=%s\"\n % oauth1_model.USERID_ACTIVE__APPLICATION\n )\n\n # ensure logout, just to be safe\n res = self.testapp_authority.get(\n \"/authority/account/logout\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=303,\n )\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /authority/account/login-form; you should be redirected automatically.\\n\\n\"\"\"\n )\n\n res = self.testapp_authority.get(\n \"/authority/account/home\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=303,\n )\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /authority/account/login-form; you should be redirected automatically.\\n\\n\"\"\"\n )", "def test_registred(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertEqual(response.status_code, 200)", "def tela_inicial_do_challenge_1():\r\n # primeiro\r\n _url_site = \"http://rpachallenge.com/\"\r\n _current_url = _browser.current_url\r\n\r\n assert _current_url == _url_site", "def test_get_next_url_recent_tour(self, mock_get_for_user, mock_get_recent_tour, mock_step1_is_complete):\n mock_get_for_user.return_value = None\n mock_get_recent_tour.return_value = self.tour2\n mock_step1_is_complete.return_value = False\n\n self.tour2.steps.add(self.step1)\n\n # add user to tour\n self.tour1.load_tour_class().add_user(self.test_user)\n self.tour1.load_tour_class().add_user(self.test_user2)\n self.tour2.load_tour_class().add_user(self.test_user)\n self.tour2.load_tour_class().add_user(self.test_user2)\n\n self.assertEqual(self.step1.url, Tour.objects.get_next_url(self.test_user))\n self.assertEqual(1, mock_get_for_user.call_count)\n self.assertEqual(1, mock_get_recent_tour.call_count)", "def __init__(self, domainurl, webretrievedelay, proxy, targettype,\n reportstringforresult, target, useragent, friendlyname, regex,\n fullurl, boutoutputrequested, importantproperty, params, headers, method, postdata, verbose):\n self._sourceurl = domainurl\n self._webretrievedelay = webretrievedelay\n self._proxy = proxy\n self._targetType = targettype\n self._reportstringforresult = reportstringforresult\n self._errormessage = \"[-] Cannot scrape\"\n self._usermessage = \"[*] Checking\"\n self._target = target\n self._userAgent = useragent\n self._friendlyName = friendlyname\n self._regex = \"\"\n self.RegEx = regex # call the helper method to clean %TARGET% from regex string\n self._fullURL = \"\"\n self.FullURL = fullurl # call the helper method to clean %TARGET% from fullurl string\n self._botOutputRequested = boutoutputrequested\n self._importantProperty = importantproperty\n self._params = None\n if params is not None:\n self.Params = params # call the helper method to clean %TARGET% from params string\n self._headers = None\n if headers is not None:\n self.Headers = headers # call the helper method to clean %TARGET% from params string\n self._postdata = None\n if postdata:\n self.PostData = postdata\n self._method = None\n self.Method = method # call the helper method to ensure result is either GET or POST\n self._results = []\n self._verbose = verbose", "def test_page_links(inspire_app):\n create_record(\"lit\", data={\"titles\": [{\"title\": \"Solenoid\"}]})\n create_record(\"lit\", data={\"titles\": [{\"title\": \"Solenoid\"}]})\n with inspire_app.test_client() as client:\n # Limit records\n response = client.get(\n \"/api/literature\", query_string=dict(size=1, page=1, q=\"Solenoid\")\n )\n response_json = response.json\n assert len(response_json[\"hits\"][\"hits\"]) == 1\n\n data = response_json[\"links\"]\n assert \"self\" in data\n assert \"next\" in data\n assert \"prev\" not in data\n\n # Assert next URL before calling it\n first_url = data[\"self\"]\n next_url = data[\"next\"]\n parsed_url = parse_url(next_url)\n assert parsed_url[\"qs\"][\"size\"] == [\"1\"]\n assert parsed_url[\"qs\"][\"page\"] == [\"2\"]\n\n # Access next URL\n response = client.get(next_url)\n response_json = response.json\n assert len(response_json[\"hits\"][\"hits\"]) == 1\n data = response.json[\"links\"]\n assert data[\"self\"] == next_url\n assert \"next\" not in data\n assert \"prev\" in data and data[\"prev\"] == first_url", "def start_requests(self):\n # This predefined list of URLs is chosen to include all types of\n # inquiries possible in the Austrian parliament in order to provide a\n # suitable testing surface for new functions.\n # urls = [\"https://www.parlament.gv.at/PAKT/VHG/XXV/JPR/JPR_00019/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/JPR/JPR_00016/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/J/J_06954/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/M/M_00178/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/JEU/JEU_00003/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/J/J_06758/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/BR/J-BR/J-BR_03089/index.shtml\",\n # \"https://www.parlament.gv.at/PAKT/VHG/BR/J-BR/J-BR_03091/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/BR/J-BR/J-BR_01155/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_06110/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_06651/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_04024/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_04025/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XX/M/M_00178/index.shtml\"]\n urls = [] if not self.url_override else [self.url_override]\n\n if self.LLP and not self.url_override:\n for i in self.LLP:\n for nrbr in ['NR', 'BR']:\n roman_numeral = roman.toRoman(i)\n options = self.URLOPTIONS.copy()\n options['GP'] = roman_numeral\n options['NRBR'] = nrbr\n url_options = urlencode(options)\n url_llp = \"{}?{}\".format(self.BASE_URL, url_options)\n rss = feedparser.parse(url_llp)\n\n self.logger.info(\"GP {}: {} inquiries from {}\".format(\n roman_numeral, len(rss['entries']), nrbr)\n )\n urls = urls + [entry['link'] for entry in rss['entries']]\n self.TOTAL_COUNTER = len(urls)\n for url in urls:\n yield self.make_requests_from_url(url)", "def setUp(self):\r\n super(ExportTestCase, self).setUp()\r\n self.url = reverse_course_url('export_handler', self.course.id)", "def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)", "def setUp(self):\n class TestHandler(BaseHandler):\n urlconf = 'conman.routes.tests.urls'\n\n self.route = mock.Mock()\n self.request = mock.Mock()\n self.handler = TestHandler(self.route)\n self.view = 'conman.routes.tests.urls.dummy_view'", "def test_api_new_game(self):\n\n with self.client as client:\n ...\n # write a test for this route", "def test_homepage_next(self):\n create_user()\n login(self.app, 'me1', 'password')\n create_equipment()\n create_events()\n\n response1 = self.app.get('/', follow_redirects=True)\n self.assertEqual(response1.status_code, 200)\n response_text = response1.get_data(as_text=True)\n today = datetime.now().strftime('%Y-%m-%d')\n month_range = calendar.monthrange(int(today[0:4]), int(today[5:7]))\n date_current = date(int(today[0:4]), int(today[5:7]), int(today[8:10]))\n self.assertIn(str(date_current.month) + \"/\" + str(date_current.day), response_text)\n response2 = self.app.get('/get_calendar/' + str(date(int(today[0:4]), int(today[5:7]), int(today[8:10]) + 1 if int(today[8:10]) < month_range[1] else 1)))\n response_text = response2.get_data(as_text=True)\n self.assertIn(str(date_current.month) + \"/\" + str(date_current.day + 1), response_text)\n self.assertEqual(response2.status_code, 200)\n self.assertIn('Calendar', response_text)\n self.assertIn('Logout', response_text)\n self.assertIn('eq1', response_text)\n self.assertIn('New equipment', response_text)\n self.assertIn('Next Day', response_text)\n self.assertIn('Previous Day', response_text)\n\n self.assertNotIn('Example1', response_text)\n self.assertNotIn('Example2', response_text)", "def test_computed_url(self):\n t = self.create_request_object()\n self.assertEqual(\"studies\", t.url_path())", "def test_smoke_test(self):\n urls = [ ]\n urls.append('/')\n urls.append(reverse('api_doc'))\n urls.append(reverse('laws'))\n urls.append(reverse('issue_list_user', args=['test0']))\n\n for url in urls:\n response = self.client.get(url)\n self.assertEqual(response.status_code , 200)", "def assertRedirects(self, response, expected_url, *args, **kwargs):\n\n # super().assertRedirects(response, expected_url, host=self.tenant.domain_url)\n super().assertRedirects(response, expected_url, *args, **kwargs)", "def test_0020_external(self):\n self.setup_defaults()\n app = self.get_app()\n\n with app.test_request_context('/'):\n self.assertEqual(\n url_for('nereid.website.home', _external=True),\n 'http://localhost/'\n )", "def url_shortner(self):", "def test_can_be_redirected(self):\n\n url = 'http://www.example.com'\n\n r = LiveRedirect(url=url,duration=HALF_DAY)\n r.save()\n\n TEST_URLS = [\n '%s/%s' % (self.live_server_url,r.slug),\n '%s/%s/' % (self.live_server_url,r.slug),\n ]\n\n for url in TEST_URLS:\n\n self.browser.get(url)\n\n body = self.browser.find_element_by_tag_name('body')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text)\n self.assertNotIn('500',body.text)\n\n # Slug page should always state what the url is\n self.assertIn(r.url, body.text, 'Link url not displayed on slug page!')\n\n # Slug page should always have a link to the correct page!\n links = self.browser.find_elements_by_tag_name('a')\n\n ok = False\n for link in links:\n if link.get_attribute('href').rstrip('/') == r.url.rstrip('/'):\n ok = True\n break\n\n self.failIf(not ok,'No link to target!')", "def test_visit(self, client, site, landing_page):\n response = client.get(landing_page.relative_url(site))\n assert response.status_code == 200", "def test_get_proxied_url_1(self):\n test_urlpattern = URLPattern(url=\"gale.com\")\n self.assertEqual(test_urlpattern.get_proxied_url, \"gale-com\")", "def setUp(self):\r\n self.client = Client()\r\n self.ping_url = reverse('status.service.celery.ping')", "def setUp(self):\n\n user = User(username=\"Luis\", email=\"dfslkjfijeflkj\")\n user.save()\n lti_profile = LTIProfile.objects.create(\n user=user, name=user.username, anon_id=\"luis123\"\n )\n lti_profile.save()\n\n course = LTICourse(course_name=\"Fake Course\", course_id=\"BlueMonkeyFake\")\n course.save()\n course.course_admins.add(lti_profile)\n\n self.assignment = Assignment(\n assignment_name=\"Test\", pagination_limit=10, course=course\n )\n self.assignment.save()\n\n self.tod = TargetObject(\n target_title=\"TObj2\",\n target_author=\"Test Author\",\n target_content=\"Fake Content2\",\n target_citation=\"Fake Citation2\",\n target_type=\"tx\",\n )\n self.tod.save()\n\n self.aTarget = AssignmentTargets(\n assignment=self.assignment,\n target_object=self.tod,\n order=1,\n target_external_css=\"\",\n target_instructions=\"Fake Instructions\",\n target_external_options=\"\",\n )\n self.aTarget.save()\n\n self.target_path = reverse(\"hx_lti_initializer:launch_lti\")\n self.launch_url = \"http://testserver{}\".format(self.target_path)\n self.resource_link_id = \"some_string_to_be_the_fake_resource_link_id\"\n\n # set the starting resource\n lti_resource_link_config = LTIResourceLinkConfig.objects.create(\n resource_link_id=self.resource_link_id,\n assignment_target=self.aTarget,\n )\n\n self.consumer = ToolConsumer(\n consumer_key=settings.CONSUMER_KEY,\n consumer_secret=settings.LTI_SECRET,\n launch_url=self.launch_url,\n params={\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": self.resource_link_id,\n \"lis_person_sourcedid\": lti_profile.name,\n \"lis_outcome_service_url\": \"fake_url\",\n \"user_id\": lti_profile.anon_id,\n \"roles\": [\"Learner\"],\n \"context_id\": course.course_id,\n },\n )\n self.lti_params = self.consumer.generate_launch_data()", "def get_absolute_url(self):\n return reverse('trialResponse-detail', args=[str(self.responseId)])", "def main(self):\n base_url = self.env.get(\"base_url\", BASE_URL)\n self.env[\"url\"] = self.get_opera_url(base_url)\n self.output(\"Found URL %s\" % self.env[\"url\"])", "def new_journey(self, starting_point, end_point, mode=None):\n self.starting_point = starting_point\n self.end_point = end_point\n self.mode = mode if mode else self.default_mode\n self._heading = \"\"\n self._footer = \"\"\n self._steps = []\n self._found = False\n\n # Let's make sure that mode is valid\n if self.mode.lower() not in self.valid_modes:\n self._heading = \"The mode of travel must be either {}.\".format(\n \", \".join(x for x in self.valid_modes[:-1]) + \" or \" + self.valid_modes[-1]\n )\n return self\n\n # Grab the directions, check for an error\n try:\n self._directions = Directions().directions(self.starting_point, self.end_point, self.mode)\n except (NoResults, InvalidRequest, GmapException) as e:\n self._heading = \"We couldn't find ({}) directions from: {}, to {}.\".format(\n self.mode,\n self.starting_point,\n self.end_point\n )\n except (RateLimitExceeded, RequestDenied) as e:\n self._heading = \"Google is a little busy at the moment, or for some reason our request has been \" \\\n \"denied. Wait a while, and then try again.\"\n else:\n if self._directions:\n self._found = True\n self._heading = \"These are the steps for the ({}) journey from {} to {}.\".format(\n self.mode,\n self._directions[0]['legs'][0]['start_address'],\n self._directions[0]['legs'][0]['end_address'],\n )\n self._steps = [\n \"{:3}. {} ({} / {})\".format(\n counter + 1,\n Markup(step['html_instructions']).striptags(),\n step['distance']['text'],\n step['duration']['text']\n ) for counter, step in enumerate(self._directions[0]['legs'][0]['steps'])\n ]\n self._footer = self._directions[0]['copyrights']\n\n return self", "def test_main_overview_task_url(self):\n\n # change config\n set_main_overview('task')\n\n # login testuser\n self.client.login(\n username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'\n )\n # get reverse url\n url = reverse('main_overview')\n # compare url\n self.assertEqual(url, '/main_overview/')\n # create url\n destination = urllib.parse.quote('/task/')\n # get response\n response = self.client.get('/main_overview/')\n # compare redirect\n self.assertRedirects(\n response, destination, status_code=302, target_status_code=200\n )", "def setUp(self):\n self.response = self.client.get('/map/')", "def test_url_construction(self):\n\n a = api.InvenTreeAPI(\"http://localhost:1234\", connect=False)\n\n tests = {\n 'part': 'http://localhost:1234/api/part/',\n '/part': 'http://localhost:1234/api/part/',\n '/part/': 'http://localhost:1234/api/part/',\n 'order/so/shipment': 'http://localhost:1234/api/order/so/shipment/',\n }\n\n for endpoint, url in tests.items():\n self.assertEqual(a.constructApiUrl(endpoint), url)", "def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200", "def test_analytics_proxy(self, act):\r\n act.return_value = self.FakeProxyResponse()\r\n\r\n url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {\r\n 'aname': 'ProblemGradeDistribution'\r\n })\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # check response\r\n self.assertTrue(act.called)\r\n expected_res = {'test_content': \"robot test content\"}\r\n self.assertEqual(json.loads(response.content), expected_res)", "def test_get_from_another_way(self):\n url = reverse('route', kwargs={'way_id': 101, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def setUp(self):\n h = self.MyTestHandler()\n h.request = Request.blank('/rpc/')\n h.response = Response()\n self.handler = h", "def test_can_create_redirect(self):\n\n #Homepage\n self.browser.get(self.live_server_url)\n\n #Look for the submit button and the two inputs\n url_field = self.browser.find_element_by_id('id_url')\n\n #duration is a slider now, so find that\n duration_field = self.browser.find_element_by_id('slider')\n\n # Enter something into the inputs\n url_field.send_keys('www.example.com')\n duration_field.send_keys(Keys.RIGHT)\n\n submit_button = self.browser.find_element_by_tag_name('input')\n submit_button.submit()\n\n # Ensure that the submit doesn't redirect the user somewhere stupid\n body = self.browser.find_element_by_tag_name('body')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text)\n self.assertNotIn('500',body.text)", "def test_no_tour(self):\n mock_request = Mock(user=self.test_user, path='mock2', method='get', GET={})\n mock_view = MockView(request=mock_request)\n response = mock_view.dispatch(mock_request)\n self.assertEqual(200, response.status_code)", "def test_redirect_view(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(reverse(testurl))\n #self.assertEqual(301, response.status_code)", "def setUp(self) -> None:\n self.url = '{}/places_search'.format(api_url)", "def __init__(self, testcase, url, method='GET'):\n self.testcase = testcase\n logging.handlers.HTTPHandler.__init__(self, 'testserver', url, method)", "def test_get_goals(self):\n pass", "def test_get_next_url_not_none():\n # setup\n next_link = get_urls.get_next_url(constants[\"URLS\"][\"TESTED_URL\"])\n assert next_link # not None", "def setUp(self):\r\n super(CourseGraderUpdatesTest, self).setUp()\r\n self.url = get_url(self.course.id, 'grading_handler')\r\n self.starting_graders = CourseGradingModel(self.course).graders", "def test_main(self):\n path = reverse(\"main\")\n request = RequestFactory().get(path)\n response = index(request)\n assert response.status_code == 200", "def setUp(self):\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"", "def setUp(self):\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"", "def url(self):\n ...", "def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)", "def step_impl(context, query):\n url = context.base_url+query\n print('url:',url,'\\n')\n with closing(requests.get(url)) as response:\n context.response = response\n context.response_json = response.json()", "def test_01_link_object(self):\r\n # For app\r\n res = self.app.get(\"/api/app\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n app_link = self.hateoas.link(rel='self', title='app',\r\n href='http://localhost/api/app/1')\r\n\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert app_link == output['link'], err_msg\r\n\r\n err_msg = \"There should be a Links list with the category URI\"\r\n assert output['links'] is not None, err_msg\r\n assert len(output['links']) == 1, err_msg\r\n app_link = self.hateoas.link(rel='category', title='category',\r\n href='http://localhost/api/category/1')\r\n assert app_link == output['links'][0], err_msg\r\n\r\n # For task\r\n res = self.app.get(\"/api/task\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n task_link = self.hateoas.link(rel='self', title='task',\r\n href='http://localhost/api/task/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert task_link == output['link'], err_msg\r\n err_msg = \"There should be one parent link: app\"\r\n assert output.get('links') is not None, err_msg\r\n assert len(output.get('links')) == 1, err_msg\r\n err_msg = \"The parent link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='app',\r\n href='http://localhost/api/app/1')\r\n assert output.get('links')[0] == app_link, err_msg\r\n\r\n # For taskrun\r\n res = self.app.get(\"/api/taskrun\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n task_link = self.hateoas.link(rel='self', title='taskrun',\r\n href='http://localhost/api/taskrun/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert task_link == output['link'], err_msg\r\n err_msg = \"There should be two parent links: app and task\"\r\n assert output.get('links') is not None, err_msg\r\n assert len(output.get('links')) == 2, err_msg\r\n err_msg = \"The parent app link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='app',\r\n href='http://localhost/api/app/1')\r\n assert output.get('links')[0] == app_link, err_msg\r\n\r\n err_msg = \"The parent task link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='task',\r\n href='http://localhost/api/task/1')\r\n assert output.get('links')[1] == app_link, err_msg\r\n\r\n # Check that hateoas removes all link and links from item\r\n without_links = self.hateoas.remove_links(output)\r\n err_msg = \"There should not be any link or links keys\"\r\n assert without_links.get('link') is None, err_msg\r\n assert without_links.get('links') is None, err_msg\r\n\r\n # For category\r\n res = self.app.get(\"/api/category\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n category_link = self.hateoas.link(rel='self', title='category',\r\n href='http://localhost/api/category/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert category_link == output['link'], err_msg\r\n err_msg = \"There should be no other links\"\r\n assert output.get('links') is None, err_msg\r\n err_msg = \"The object links should are wrong\"\r\n\r\n # For user\r\n # Pending define what user fields will be visible through the API\r\n # Issue #626. For now let's suppose link and links are not visible\r\n # res = self.app.get(\"/api/user?api_key=\" + self.root_api_key, follow_redirects=True)\r\n # output = json.loads(res.data)[0]\r\n # err_msg = \"There should be a Link with the object URI\"\r\n # assert output['link'] is not None, err_msg\r\n # user_link = self.hateoas.link(rel='self', title='user',\r\n # href='http://localhost/api/user/1')\r\n # err_msg = \"The object link ir wrong: %s\" % output['link']\r\n # assert user_link == output['link'], err_msg\r\n # # when the links specification of a user will be set, modify the following\r\n # err_msg = \"The list of links should be empty for now\"\r\n # assert output.get('links') == None, err_msg\r", "def test_create_ride_resolves(self):\n url = reverse('create_ride')\n self.assertEquals(resolve(url).func, create_ride) # pylint: disable=deprecated-method", "def default_setup(self, mocker):\n # pylama: ignore=W0201\n self.url = '/api/v0/publish'\n self.client = wsgi.application.test_client()\n self._retryable = mocker.patch.object(wsgi, '_retryable')", "def _test_good(self,\n the_request_method,\n the_request_uri,\n the_request_headers,\n the_request_body,\n the_response_code,\n the_response_headers,\n the_response_body,\n the_response_content_type):\n\n the_response_is_ok = True\n the_request_principal = \"das@example.com\"\n\n def async_app_service_forwarder_forward_patch(http_client, request, callback):\n self.assertIsNotNone(request)\n\n expected_url = \"http://%s%s\" % (\n self.__class__._app_service,\n the_request_uri\n )\n self.assertEqual(request.url, expected_url)\n\n self.assertIsNotNone(request.method)\n self.assertEqual(request.method, the_request_method)\n\n self.assertIsNotNone(request.headers)\n self.assertEqual(len(request.headers), 1 + len(the_request_headers))\n expected_headers = tornado.httputil.HTTPHeaders(the_request_headers)\n expected_headers[\"Authorization\"] = \"%s %s\" % (\n self.__class__._app_service_auth_method,\n the_request_principal)\n self.assertEqual(request.headers, expected_headers)\n\n response = mock.Mock()\n response.error = None\n response.code = the_response_code\n response.body = the_response_body\n response.headers = tornado.httputil.HTTPHeaders(the_response_headers)\n if response.body:\n response.headers[\"Content-type\"] = the_response_content_type\n response.headers[\"Content-length\"] = str(len(response.body))\n response.request_time = 24\n callback(response)\n\n def on_async_app_service_forward_done(is_ok,\n http_status_code,\n headers,\n body):\n\n self.assertIsNotNone(is_ok)\n self.assertEqual(is_ok, the_response_is_ok)\n\n if not is_ok:\n return\n\n self.assertIsNotNone(http_status_code)\n self.assertEqual(http_status_code, the_response_code)\n\n self.assertIsNotNone(headers)\n\n if the_response_body is None:\n self.assertIsNone(body)\n\n self.assertEqual(headers, the_response_headers)\n else:\n self.assertIsNotNone(body)\n self.assertEqual(body, the_response_body)\n\n self.assertEqual(len(headers), 2 + len(the_response_headers))\n the_expected_headers = tornado.httputil.HTTPHeaders(the_response_headers)\n the_expected_headers[\"Content-type\"] = the_response_content_type\n the_expected_headers[\"Content-length\"] = str(len(body))\n self.assertEqual(headers, the_expected_headers)\n\n name_of_method_to_patch = \"tornado.httpclient.AsyncHTTPClient.fetch\"\n with mock.patch(name_of_method_to_patch, async_app_service_forwarder_forward_patch):\n aasf = async_app_service_forwarder.AsyncAppServiceForwarder(\n the_request_method,\n the_request_uri,\n the_request_headers,\n the_request_body,\n the_request_principal)\n aasf.forward(on_async_app_service_forward_done)", "def step_impl(context):\n context.driver.get(context.base_url)", "def setUpClass(cls) -> None:\n cls.result = Tracer('https://google.com').trace()", "def __call__(self):\r\n self.init_data = td.import_data(self.__module__)\r\n self.page1() # GET navigation (requests 101-153)\r\n\r\n grinder.sleep(20)\r\n self.page2() # GET case (requests 201-252)\r\n\r\n grinder.sleep(20)\r\n self.page3() # GET view (requests 301-365)\r\n\r\n grinder.sleep(20)\r\n self.page4() # POST view (requests 401-452)\r", "def test_urls(self):\n base_test_url = 'http://{}:{}/'.format(TESTING_CONFIG['host'],\n TESTING_CONFIG['port'])\n self.conn._host_url == base_test_url\n self.conn.aheader_url == base_test_url + 'analysis_header'\n self.conn.atail_url == base_test_url + 'analysis_tail'\n self.conn.dref_url == base_test_url + 'data_reference'\n self.conn.dref_header_url == base_test_url + 'data_reference_header'", "async def test_recordings_proxy_view_success(hass_client_local_frigate: Any) -> None:\n\n resp = await hass_client_local_frigate.get(\"/api/frigate/recordings/present\")\n assert resp.status == HTTP_OK\n\n resp = await hass_client_local_frigate.get(\"/api/frigate/recordings/not_present\")\n assert resp.status == HTTP_NOT_FOUND", "def __init__(self, base_url, start_urls, config, helper_outfile, verbose):\n\n # setup class variables\n self.base_url = base_url\n self.config = config\n self.helper_outfile = helper_outfile\n self.verbose = verbose\n self.found_urls = set()\n self.crawled_urls = {}\n self.crawled_paths = {}\n self.param_infos = {}\n self.helper_pid = None\n self.found_cookies = []\n self.comments = {}\n self.redirects = {}\n self.driver = None\n\n # figure out domain\n parsed_url = urllib.parse.urlparse(base_url)\n self.domain = parsed_url.hostname\n self.port = parsed_url.port\n if not self.port:\n self.port = 80 if parsed_url.scheme == \"http\" else 443\n self.protocol_prefix = \"%s://\" % parsed_url.scheme\n\n # compile exclude path regexes from config\n self.exclude_paths = []\n if self.config.get(\"exclude_paths\", \"\"):\n exclude_paths_str = util.parse_as_csv(self.config.get(\"exclude_paths\", \"\"))\n for path_str in exclude_paths_str:\n self.exclude_paths.append(re.compile(path_str))\n\n # parse cookies from config\n self.cookies = {}\n for key_val_pair in self.config[\"cookie_str\"].split(\";\"):\n if not key_val_pair:\n continue\n if \"=\" not in key_val_pair:\n self.cookies[key_val_pair.strip()] = \"\"\n else:\n key, val = key_val_pair.strip().split(\"=\")\n self.cookies[key.strip()] = val.strip()\n\n # setup start urls\n self.start_urls = set([base_url])\n for url in start_urls:\n # skip paths that are excluded from crawling\n if self.exclude_paths and url.count(\"/\") > 2:\n check_str = \"/\" + \"/\".join(url.split(\"/\")[3:])\n if any(re_path.match(check_str) for re_path in self.exclude_paths):\n continue\n self.start_urls.add(url)\n self.start_urls = list(self.start_urls)\n\n # create unix socket for IPC with crawler helper\n if os.path.exists(UNIX_SOCK_ADDR):\n os.remove(UNIX_SOCK_ADDR)\n self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.socket.bind(UNIX_SOCK_ADDR)\n\n # setup selenium if it is configured to be used\n if config[\"use_selenium\"].lower() == \"true\":\n import logging\n logging.getLogger(\"seleniumwire\").setLevel(logging.ERROR)\n from seleniumwire import webdriver\n from selenium.webdriver.chrome.options import Options\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--user-agent=%s\" % self.config[\"user_agent\"])\n\n # on Linux running Selenium as root requires '--no-sandbox' option\n if os.geteuid() == 0 and sys.platform.startswith(\"linux\"):\n chrome_options.add_argument(\"--no-sandbox\")\n self.driver = webdriver.Chrome(options=chrome_options)\n\n # disallow downloads via Selenium (see https://stackoverflow.com/a/47366981)\n self.driver.command_executor._commands[\"send_command\"] = (\"POST\", \"/session/$sessionId/chromium/send_command\")\n params = {\"cmd\": \"Page.setDownloadBehavior\", \"params\": {\"behavior\": \"disallow\", \"downloadPath\": \"\"}}\n command_result = self.driver.execute(\"send_command\", params)\n\n # add cookies\n self.driver.get(self.base_url) # initial request required to add cookies\n self.driver.delete_all_cookies()\n for key, val in self.cookies.items():\n self.driver.add_cookie({\"name\": key, \"value\": val, \"domain\": self.domain})", "def test_computed_url(self):\n t = TwoHundredRequest()\n self.assertEqual(\"twohundred\", t.url_path())", "def test_get_learners(self):\n pass", "def parse_steps(self, response):\n response_link = []\n inquiry_item = response.meta['inquiry_item']\n\n # Get or created a default-phase for inquiries, because there are no phases in\n # simple inquiries.\n phase_item, created = Phase.objects.get_or_create(\n title='default_inqu')\n if created:\n log.msg(u\"Created Phase {}\".format(\n green(u'[{}]'.format(phase_item.title))),level=log.DEBUG)\n\n steps = INQUIRY.STEPS.xt(response)\n\n for step in steps:\n if \"Schriftliche Beantwortung\" in step[\"title\"]:\n response_link = INQUIRY.RESPONSE_LINK.xt(response)\n\n for step in steps:\n step_item, created = Step.objects.update_or_create(\n title=step['title'],\n sortkey=step['sortkey'],\n date=step['date'],\n protocol_url=step['protocol_url'],\n law=inquiry_item,\n phase=phase_item,\n source_link=response.url\n )\n step_item.save()\n if response_link:\n return response_link\n else:\n return", "def test_get(self):\n john_gamer = Gamer(self.john)\n john_gamer.gamer.set_new_location(50, 50)\n john_gamer.tasks.start(1)\n\n # in game field, in game, 2 tasks started:\n user_1 = ActiveUser.create()\n gamer_1 = Gamer(user_1)\n gamer_1.gamer.set_new_location(45, 40)\n gamer_1.tasks.start(1)\n gamer_1.tasks.start(2)\n\n # in game field, in game, 1 task started:\n user_2 = ActiveUser.create()\n gamer_2 = Gamer(user_2)\n gamer_2.gamer.set_new_location(55, 60)\n gamer_2.tasks.start(3)\n\n self.client.force_login(self.john)\n\n resp = self.client.get(self.URL)\n\n with self.subTest(\"Test status is correct\"):\n self.assertEqual(\n resp.status_code, status.HTTP_200_OK,\n )\n\n with self.subTest(\"Test response is not empty\"):\n # as we tested 'show_game_field' method in details, here we run smoke test only:\n self.assertNotEqual(\n resp.json(),\n []\n )", "def setUp(self):\n\n c = Client()\n self.response = c.get('/')\n self.content = self.response.content", "def test_show_ride_resolves(self):\n url = reverse('showridepage', args=['078508ce-2efc-4316-8987-12b9551be5b4'])\n self.assertEquals(resolve(url).func, show_ride) # pylint: disable=deprecated-method", "def test_with_links_cases_and_issues():\n pass", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"" ]
[ "0.6109293", "0.6100165", "0.6068033", "0.60075814", "0.5958549", "0.5953743", "0.5904192", "0.5858552", "0.5848845", "0.5810708", "0.57947206", "0.57568485", "0.57560605", "0.5703684", "0.5696326", "0.56882894", "0.5676402", "0.5666247", "0.5610928", "0.5601853", "0.5570623", "0.55682325", "0.5514111", "0.5511874", "0.5505077", "0.5502392", "0.5487443", "0.5481906", "0.54800403", "0.54793096", "0.5478554", "0.54550904", "0.5449248", "0.54488957", "0.54486907", "0.54480445", "0.5434043", "0.54283935", "0.5425634", "0.54185826", "0.5411023", "0.5402351", "0.53953403", "0.5394745", "0.53841364", "0.5382407", "0.5378793", "0.5367154", "0.5359688", "0.5356297", "0.53548163", "0.53521115", "0.5346454", "0.533535", "0.53194207", "0.5316885", "0.5313143", "0.53089625", "0.5302877", "0.5299354", "0.5298943", "0.5297605", "0.5291475", "0.52890366", "0.52820796", "0.5281581", "0.5280808", "0.52796215", "0.52712464", "0.52645415", "0.5263534", "0.5256092", "0.52482015", "0.52404803", "0.5232341", "0.5230364", "0.5225445", "0.521568", "0.5212784", "0.5212784", "0.5211098", "0.5202019", "0.51997703", "0.5191189", "0.5187537", "0.51852196", "0.51804876", "0.51791036", "0.51758003", "0.5175678", "0.51695603", "0.5169339", "0.5168494", "0.51639295", "0.5163523", "0.51622695", "0.5153458", "0.5148756", "0.51481026", "0.51426816", "0.514189" ]
0.0
-1